prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import sdeint
from scipy.integrate import odeint
from mocu.utils.utils import *
from mocu.utils.costfunctions import *
from mocu.src.experimentaldesign import *
from mocu.src.mocu_utils import *
from mocu.scripts.visualizetoysystem import plot_points_in_full_space
from scipy.stats import multivariate_normal
import torch
import torch.nn as nn
import time
class DAG( nn.Module ):
def __init__( self , layers_list ):
super( DAG , self ).__init__()
n_layers = len( layers_list )
self.f_linear = nn.ModuleList( [ nn.Linear( layers_list[i] , \
layers_list[i+1] , bias=False ) \
for i in range(n_layers-1) ] )
def set_weight_ij_in_layer_k( self , k , i , j , value ):
f_idx = self.f_linear[ k ]
f_idx.weight.data[i,j] = float(value)
def multiply_weight_ij_in_layer_k( self , k , i , j , value ):
f_idx = self.f_linear[ k ]
f_idx.weight.data[i,j] *= float(value)
def forward(self,x):
for f_i in self.f_linear:
x = f_i( x )
return x
def set_model_weights_with_theta( self , theta , theta_kij ):
assert( len(theta_kij) == 3 )
assert( np.size(theta) == 1 )
k,i,j = theta_kij
self.set_weight_ij_in_layer_k( k , i , j , theta )
def set_model_weights_with_psi( self , psi , psi_kij ):
assert( len(psi_kij) == 3 )
assert( np.size(psi) == 1 )
k,i,j = psi_kij
self.multiply_weight_ij_in_layer_k( k , i , j , psi )
def set_model_weights_with_theta_and_psi( self , psi , theta , psi_kij , theta_kij ):
self.set_model_weights_with_theta( theta , theta_kij )
self.set_model_weights_with_psi( psi , psi_kij )
def get_num_layers_and_weights( self ):
k = 0
I = []
J = []
for f_i in self.f_linear:
ij = f_i.weight.shape
I.append( ij[0] )
J.append( ij[1] )
k += 1
return k , I , J
def set_dag_weights_from_theta( self , theta ):
# Assumes theta enumeration corresponds to row-major raveling
k,I,J = self.get_num_layers_and_weights( )
num_weights = np.sum( [ ii * jj for (ii,jj) in zip(I,J) ] )
assert( len(theta) == num_weights )
count = 0
for kk in range(k):
for ii in range(I[kk]):
for jj in range(J[kk]):
self.set_model_weights_with_theta( theta[count] , [kk,ii,jj] )
count += 1
def set_dag_weights_from_psi( self , psi ):
# Assumes psi enumeration corresponds to row-major raveling
k,I,J = self.get_num_layers_and_weights( )
num_weights = np.sum( [ ii * jj for (ii,jj) in zip(I,J) ] )
assert( len(psi) == num_weights )
count = 0
for kk in range(k):
for ii in range(I[kk]):
for jj in range(J[kk]):
self.set_model_weights_with_psi( psi[count] , [kk,ii,jj] )
count += 1
def dag_forward( dag ):
def dag_forward_round( x ):
y = dag( x ).detach().numpy()[0]
tol = 1e-5
if ( y > tol ):
y = 1
elif ( y < -tol ):
y = 0
else:
y = np.random.choice( [0,1] )
return y
return dag_forward_round
def create_dag_cost_function( dag , n_mc , S , rho_S , psi_actions ):
dag_forward_round = dag_forward( dag )
def cost( theta , psi ):
assert( ( psi >= 0 ) & ( psi <= len(psi_actions) ) )
psi = int(psi)
psi_action = psi_actions[ psi ]
dag.set_dag_weights_from_theta( theta )
dag.set_dag_weights_from_psi( psi_action )
J = 0.0
for i in range(n_mc):
xi = torch.tensor( S[ np.random.choice( np.arange(len(S)) , p=rho_S ) ] ).float()
y_model = dag_forward_round( xi )
desired_state = 0
J += np.abs( y_model - desired_state ) / float(n_mc)
return J
return cost
def p(I,J,n,m):
assert( len(I) == len(J) )
P = np.ones([n,m])
for i in range( len(I) ):
P[I[i],J[i]] = 0
return P.ravel()
def make_random_psi( n , m , N , out ):
Psi_actions = []
for i in range(N):
lenI = np.random.choice( np.arange( n*m ) )
I = np.random.choice( np.arange(n) , lenI )
J = np.random.choice( np.arange(m) , lenI )
Psi_actions.append( np.append( p(I,J,n,m) , out ) )
return Psi_actions
def make_random_psi_diagonal( n , N , out ):
Psi_actions = []
for i in range(N):
lenI = np.random.choice( np.arange( n ) )
I = np.random.choice( np.arange(n) , lenI , replace=False )
Psi_actions.append( np.append( p(I,I,n,n) , out ) )
return Psi_actions
def make_random_experiments( n_theta , n_x , p_min , p_max ):
rho_y_given_x_theta = []
for i in range( n_x ):
ri = np.zeros( [n_theta , 2] )
ri[:,0] = p_min
ri[:,1] = p_max
idx_test = np.random.choice( np.arange(n_theta) )
ri[idx_test] = [p_max , p_min]
rho_y_given_x_theta.append( ri )
return rho_y_given_x_theta
def setup_dehghannasiri_test( ):
layers = [ 2 , 1 ]
dag = DAG( layers )
theta = [ (-1,-1) , (-1,1) , (1,-1) , (1,1) ]
rho_theta = [ 0.35 , 0.3 , 0.25 , 0.1 ]
Theta = dict(zip(['theta','rho_theta'] , [theta,rho_theta]))
Psi = [ 0 , 1 ]
Psi_actions = [ (1,0) , (0,1) ]
S = [ (0,0) , (0,1) , (1,0) , (1,1) ]
rho_S = [ 0.1 , 0.2 , 0.3 , 0.4 ]
X = [ 0 , 1 ]
Y = [ -1 , 1 ]
E = dict(zip(['X','Y'] , [X,Y]))
n_mc = 1000
cost = create_dag_cost_function( dag , n_mc , S , rho_S , Psi_actions )
rho_y_given_x1 = np.array( [ [0.6,0.4] , [0.6,0.4] , [0.2,0.8] , [0.2,0.8] ] )
rho_y_given_x2 = np.array( [ [0.8,0.2] , [0.4,0.6] , [0.8,0.2] , [0.4,0.6] ] )
rho_y_given_x_theta = [ rho_y_given_x1 , rho_y_given_x2 ]
theta_actual = theta[1]
run_real_system = lambda idx_x : np.random.choice( Y , p = rho_y_given_x_theta[idx_x][1] )
return dag , Theta , Psi , Psi_actions , S , rho_S , E , rho_y_given_x_theta , theta_actual , run_real_system
def select_experiment( n_mc=100 , n_experiment=10 , n_psi=10 ):
np.random.seed(0)
n_mc = int(n_mc)
layers = [ 10 , 10 , 1 ]
dag = DAG( layers )
t1 = np.diag( np.random.choice( [-1,1] , layers[0] ) )
t2 = np.diag( [ -1 , 1 , -1 , 1 , -1 , 1 , -1 , 1 , -1 , 1 ] )
t3 = np.diag( np.random.choice( [-1,1] , layers[0] ) )
t4 = np.diag( np.random.choice( [-1,1] , layers[0] ) )
out = np.ones( t1.shape[0] )
theta = [ np.append(t1.ravel(),out) , np.append(t2.ravel(),out) , np.append(t3.ravel(),out) , np.append(t4.ravel(),out) ]
rho_theta = [ 1./len(theta) for i in range(len(theta)) ]
Theta = dict(zip(['theta','rho_theta'] , [theta,rho_theta]))
Psi = np.arange( n_psi )
n = layers[0]; m = layers[1]
Psi_actions = make_random_psi_diagonal( n , len(Psi) , out )
S = [ np.random.choice( [0,1,-1] , layers[0] ) for i in range(4) ]
rho_S = [ 0.1 , 0.2 , 0.3 , 0.4 ]
X = | np.arange(10) | numpy.arange |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import numpy as np
class Box(object):
def __init__(
self,
width=1,
height=1,
length=1,
centerX=0,
centerY=0,
centerZ=0,
yaw=0.0,
pitch=0.0,
roll=0.0,
translationX=0,
translationY=0,
translationZ=0,
):
# In webots length is in z-axis, width is in x-axis and height is in y-axis
# Center is the rotation center for the box
# -> in webots, this should be the rear axle location relative to the center of the box
# -> center is the vector from the true center of the box to the rotation center of the box
# In webots yaw is CC around the y-axis!
# In webots pitch is CC around the z-axis!
# In webots roll is CC around the x-axis!
# NOTE: this geometry class applies a translation to get the center of rotation,
# rotates the box and then applies a global translation to move the rectangle in a global coordinate system
self.dimensions = np.array([width, height, length])
self.center = np.array([centerX, centerY, centerZ])
self.translation = np.array([translationX, translationY, translationZ])
self.yaw = yaw
self.pitch = pitch
self.roll = roll
self.unrotatedegocorners = self._getunrotatedegocorners()
self.rotation = self.getyawrollpitchrotation(self.yaw, self.pitch, self.roll)
# The transpose is the inverse rotation matrix
self.reverserotation = np.transpose(self.rotation)
self.corners = self.getcorners()
def __str__(self):
return "[({},{},{}), center=({},{},{}), rotation=({},{},{}), translation=({},{},{})]".format(
self.dimensions[0],
self.dimensions[1],
self.dimensions[2],
self.center[0],
self.center[1],
self.center[2],
self.yaw,
self.pitch,
self.roll,
self.translation[0],
self.translation[1],
self.translation[2],
)
def getyawrollpitchrotation(self, yaw, pitch, roll):
sin_p = math.sin(pitch)
cos_p = math.cos(pitch)
sin_y = math.sin(yaw)
cos_y = math.cos(yaw)
sin_r = math.sin(roll)
cos_r = math.cos(roll)
return np.array(
[
[
cos_p * cos_y,
cos_p * sin_y * sin_r - sin_p * cos_r,
cos_p * sin_y * cos_r + sin_p * sin_r,
],
[
sin_p * cos_y,
sin_p * sin_y * sin_r + cos_p * cos_r,
sin_p * sin_y * cos_r - cos_p * sin_r,
],
[-sin_y, cos_y * sin_r, cos_y * cos_r],
]
)
def _getunrotatedegocorners(self):
x_diff1, y_diff1, z_diff1 = -self.dimensions / 2.0 - self.center
x_diff2, y_diff2, z_diff2 = self.dimensions / 2.0 - self.center
x1, y1, z1 = [
min(x_diff1, x_diff2),
min(y_diff1, y_diff2),
min(z_diff1, z_diff2),
]
x2, y2, z2 = [
max(x_diff1, x_diff2),
max(y_diff1, y_diff2),
max(z_diff1, z_diff2),
]
corners = np.array(
[
[x1, y1, z1],
[x1, y1, z2],
[x1, y2, z1],
[x1, y2, z2],
[x2, y1, z1],
[x2, y1, z2],
[x2, y2, z1],
[x2, y2, z2],
]
)
return corners
def getcorners(self):
corners = self._getunrotatedegocorners()
if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
corners = np.inner(corners, self.rotation)
corners += self.translation
return corners
def getvolume(self):
return np.prod(self.dimensions)
def containspoint(self, point):
return self.containspoints(np.array([point]))
def containspoints(self, points):
# 1.) Rotate the point around the center
# 2.) Check to see if the points lie inside the co-linear rectangle
N, d = points.shape
ego_points = points - self.translation
if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
rotated_points = np.inner(ego_points, self.reverserotation)
else:
rotated_points = ego_points
low_corner = self.unrotatedegocorners[0]
high_corner = self.unrotatedegocorners[7]
# This is why we rotate the points rather than the box -> simpler to check if the box is
# co-linear with the axis of the local coordinate system
return np.all(
np.logical_and(
(high_corner >= rotated_points), (rotated_points >= low_corner)
),
axis=1,
)
# Note to be used externly
def _unrotated_containspoints(self, unrotated_points):
low_corner = self.unrotatedegocorners[0]
high_corner = self.unrotatedegocorners[7]
# This is why we rotate the points rather than the box -> simpler to check if the box is
# co-linear with the axis of the local coordinate system
return np.all(
np.logical_and(
(high_corner >= unrotated_points), (unrotated_points >= low_corner)
),
axis=1,
)
def _getnormals(self):
# Just need three normals of the unrotated box
p1, p2, p3, p4, p5, p6, p7, p8 = self.unrotatedegocorners
xn = np.cross(p3 - p1, p2 - p1)
yn = np.cross(p2 - p1, p5 - p1)
zn = np.cross(p5 - p1, p3 - p1)
return xn, yn, zn
def getlines(self):
p1, p2, p3, p4, p5, p6, p7, p8 = self.corners
start_points = np.array([p1, p1, p1, p2, p2, p3, p3, p4, p5, p5, p6, p7])
end_points = np.array([p2, p3, p5, p4, p6, p7, p4, p8, p6, p7, p8, p8])
return start_points, end_points
def intersects(self, box):
# NOTE: the order of the points in self.corners and self.unrotatedegocorners must not change!
# Calculates whether any corners of rect fall within self
start1, end1 = box.getlines()
intersect1 = self.intersectswithlines(points=start1, end_points=end1)
# Also need to see if any of the corners of self fall in rect
start2, end2 = self.getlines()
intersect2 = box.intersectswithlines(points=start2, end_points=end2)
return np.any(np.concatenate((intersect1, intersect2)))
# Calculates intersection point between two parallel planes with norm and defined by points 1 and 2 respectively
# norm must be the outer norm for plane1 defined by point pts_on_plane1
def _get_line_intersect_with_planes_3d(
self, points, directions, norm, pts_on_plane1, pts_on_plane2
):
r = directions
n1 = norm
n2 = -norm
d1 = -np.inner(n1, pts_on_plane1[0])
d2 = -np.inner(n2, pts_on_plane2[0])
r_n1 = np.inner(r, n1)
r_n2 = np.inner(r, n2)
n1_px = np.inner(n1, points) + d1
n2_px = np.inner(n2, points) + d2
n1_p = np.inner(n1, points)
n2_p = np.inner(n2, points)
t1 = np.zeros(len(points))
t2 = np.zeros(len(points))
# Check for parallel
z1 = np.abs(r_n1) < 1e-20
z2 = np.abs(r_n2) < 1e-20
nz1 = np.logical_not(z1)
nz2 = np.logical_not(z2)
# Check for points on plane
on1 = np.abs(n1_px) < 1e-20
on2 = np.abs(n2_px) < 1e-20
non1 = np.logical_not(on1)
non2 = np.logical_not(on2)
# All points that are not on the plane but are perpendicular -> inf
t1[np.logical_and(z1, non1)] = -np.inf
t2[np.logical_and(z2, non2)] = np.inf
# All points not perpendicular and not on the plane
nz_non1 = | np.logical_and(nz1, non1) | numpy.logical_and |
import numpy as np
from scipy.stats import dirichlet, multivariate_normal
from data_generation import *
from matplotlib.colors import to_rgb
from tqdm import tqdm
import math
LOG_EPSILON = 10E-10
MIN_VARIANCE = 10E-3
CONVERGENCE_ERROR = 10E-5
MOVING_AVERAGE_WINDOW = 3
def random_initialization(data, num_components, seed=None):
"""
Initializes parameters randomly.
:param data: observed data
:param num_components: number of components
:param seed: random seed
:return:
"""
dim = data.shape[1]
alpha = np.ones(num_components)
mixture_weights = dirichlet.rvs(alpha, size=1, random_state=seed)[0]
min_values = np.min(data, axis=0)
max_values = np.max(data, axis=0)
# Means are generated randomly within the data range
means = list((max_values - min_values) * np.random.rand(num_components, dim) + min_values)
covariances = [0.25 * np.diag(np.abs((max_values - min_values) * np.random.rand(2) + min_values)) for _ in range(
num_components)]
return mixture_weights, means, covariances
def get_log_likelihood(data, mixture_weights, means, covariances):
"""
Calculates the log-llkelihood of the data given a GMM's parameters.
:param data: observed data
:param mixture_weights: mixture weights
:param means: mean per component
:param covariances: covariance per component
:return: log-likelihood
"""
num_points = data.shape[0]
num_components = mixture_weights.size
log_likelihood = np.zeros((num_points, num_components))
for component in range(num_components):
log_likelihood[:, component] = mixture_weights[component] * multivariate_normal.pdf(data, means[component],
covariances[component])
likelihood = | np.sum(log_likelihood, axis=1) | numpy.sum |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1.0 / (1.0 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = | np.dot(X, self.weights_input_to_hidden) | numpy.dot |
import unittest
import numpy as np
from spectralcluster import autotune
from spectralcluster import configs
from spectralcluster import constraint
from spectralcluster import laplacian
from spectralcluster import refinement
from spectralcluster import spectral_clusterer
from spectralcluster import utils
RefinementName = refinement.RefinementName
ThresholdType = refinement.ThresholdType
SymmetrizeType = refinement.SymmetrizeType
LaplacianType = laplacian.LaplacianType
ConstraintName = constraint.ConstraintName
IntegrationType = constraint.IntegrationType
EigenGapType = utils.EigenGapType
ICASSP2018_REFINEMENT_SEQUENCE = configs.ICASSP2018_REFINEMENT_SEQUENCE
class TestSpectralClusterer(unittest.TestCase):
"""Tests for the SpectralClusterer class."""
def setUp(self):
super().setUp()
pass
def test_6by2_matrix(self):
matrix = np.array([
[1.0, 0.0],
[1.1, 0.1],
[0.0, 1.0],
[0.1, 1.0],
[0.9, -0.1],
[0.0, 1.2],
])
refinement_options = refinement.RefinementOptions(
gaussian_blur_sigma=0,
p_percentile=0.95,
refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(
refinement_options=refinement_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(expected, labels))
def test_1000by6_matrix(self):
matrix = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400 +
[[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300 +
[[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200 +
[[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)
noisy = | np.random.rand(1000, 6) | numpy.random.rand |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from threshold import binarize_with_threshold
from perspective_transform import perspective_transform
def viz1(binary_warped, ret, save_file=None):
"""
Visualize each sliding window location and predicted lane lines, on binary warped image
save_file is a string representing where to save the image (if None, then just display)
binary_warped: input warped image
ret: returns from fit_line
"""
# Grab variables from ret dictionary
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
def viz2(binary_warped, ret, margin=100, save_file=None):
"""
Visualize the predicted lane lines with margin, on binary warped image
save_file is a string representing where to save the image (if None, then just display)
"""
# Grab variables from ret dictionary
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Create an image to draw on and an image to show the selection window
out_img = (np.dstack((binary_warped, binary_warped, binary_warped)) * 255).astype('uint8')
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
def final_viz(img_undistorted, Minv, line_lt, line_rt, keep_state):
"""
Draw both the drivable lane area and the detected lane-lines onto the original (undistorted) frame.
:param img_undistorted: original undistorted color frame
:param Minv: (inverse) perspective transform matrix used to re-project on original frame
:param line_lt: left lane-line previously detected
:param line_rt: right lane-line previously detected
:param keep_state: if True, line state is maintained
:return: color blend
"""
height, width, _ = img_undistorted.shape
left_fit = line_lt.average_fit if keep_state else line_lt.last_fit_pixel
right_fit = line_rt.average_fit if keep_state else line_rt.last_fit_pixel
# Generate x and y values for plotting
ploty = np.linspace(0, height - 1, height)
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# TODO 10: draw road
# draw road as green polygon on original frame
road_warp = np.zeros_like(img_undistorted, dtype=np.uint8)
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose( | np.vstack([right_fitx, ploty]) | numpy.vstack |
#!/usr/bin/env python
"""Simulate data from parameterized models"""
import numpy as np
import math
import random
__author__ = "yasc"
__date_created__ = "06 July 2016"
def prior_draw(n_regressors, s, density="uniform", beta_range=[-2, 2],
sigma_range=[0, 5]):
"""Return parameters drawn from a prior distributon."""
if density == "uniform":
beta = beta_range[0]+(beta_range[1]-beta_range[0])*np.random.rand(
n_regressors+1, s)
sigma = sigma_range[1]*np.random.rand(1, s)
return np.matrix(np.vstack((beta, sigma)))
def sim_lin_reg(beta, sigma, obs_x, s, intercept=True):
"""Return data simulated from a linear model."""
# Tranpose the vector containing the beta parameters if necessary.
if beta.shape[1] > 1:
beta = beta.T
u = | np.random.randn(obs_x.shape[0], s) | numpy.random.randn |
# -*- coding: utf-8 -*-
__author__ = 'Stéphane-Poirier'
from graph import Graph
import numpy as np
class DiffGraph(Graph):
def __init__(self, tuple_of_diffs_lists):
self.nb_colors = len(tuple_of_diffs_lists)
all_diffs = []
diffs_as_list = []
for c in tuple_of_diffs_lists:
all_diffs.extend(c)
diffs_as_list.append(tuple(sorted(c)))
self.diffs_tuple = tuple(diffs_as_list)
all_diffs.sort()
diffs_lists_ok = True
expected_d = 1
for d in all_diffs:
if d != expected_d:
diffs_lists_ok = False
break
else:
expected_d += 1
if not diffs_lists_ok:
print("from_diffs Error : tuple_of_diffs_lists should be complete (each value from 1 to n present one time)")
print(" expected color is {} get {}".format(expected_d, d))
raise ValueError
# tuple_of_diffs_lists is ok, create Graph
self.nb_vertices = len(all_diffs) + 1
self.adjacencies = | np.full((self.nb_vertices, self.nb_vertices), -1, dtype=np.int8) | numpy.full |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,0,-1,-1,0,0,0,1,0]) | numpy.array |
"""
Script to compare the beta values generated by scikit learn vs the ones generated by our function.
The dataset used is the Spam dataset from the book, elements of statistical learning.
"""
import numpy as np
import pandas as pd
import scipy.linalg
import sklearn.preprocessing
import sklearn.svm
import Svm
# Get the data from the Spam Dataset
spam = pd.read_table('https://web.stanford.edu/~hastie/ElemStatLearn/datasets/spam.data', sep=' ', header=None)
test_indicator = pd.read_table('https://web.stanford.edu/~hastie/ElemStatLearn/datasets/spam.traintest', sep=' ',header=None)
# Store the features and labels as an array
x = np.asarray(spam)[:, 0:-1]
y = np.asarray(spam)[:, -1]*2 - 1
# Use the train-test split inidcator provided along with the dataset
test_indicator = np.array(test_indicator).T[0]
x_train = x[test_indicator == 0, :]
x_test = x[test_indicator == 1, :]
y_train = y[test_indicator == 0]
y_test = y[test_indicator == 1]
# Standardize the data
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Compare the betas and objective values generated by scikit learn and our code
lambduh = 1
svm = Svm.Svm(max_iter = 50)
d = x_train.shape[1]
beta_init = | np.zeros(d) | numpy.zeros |
# coding utf-8
from numpy.lib.function_base import rot90
from scipy.spatial.distance import cdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn import mixture
from collections import Counter
import json
import random
import numpy as np
from sklearn.metrics import euclidean_distances
import ot
import os
import joblib
from ot.optim import line_search_armijo
def norm_max(x):
for i in range(x.shape[1]):
tmax = x[:, i].max()
x[:, i] = x[:, i] / tmax
return x
def load_from_file(root_dir, filename, ss, ts):
f1 = root_dir + filename
with open(f1, 'r') as f:
s = f.read()
data = json.loads(s)
xs, ys, xt, yt = np.array(data[ss]['x']), np.array(data[ss]['y']), np.array(data[ts]['x']), np.array(
data[ts]['y'])
xs = norm_max(xs)
xt = norm_max(xt)
ys = np.squeeze(ys)
yt = np.squeeze(yt)
ttty = min(Counter(ys).keys())
ys = ys - ttty
yt = yt - ttty
return xs, ys, xt, yt
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def gmm_target(X, w, c, rootdir, filepath, modelpath, targetname, covtype='diag'):
if not os.path.exists(rootdir + 'data/'):
os.mkdir(rootdir + 'data/')
if not os.path.exists(rootdir + 'model/'):
os.mkdir(rootdir + 'model/')
if os.path.exists(filepath):
pass
else:
gmm = mixture.GaussianMixture(n_components=c, covariance_type=covtype)
gmm.fit(X)
x1 = []
x2 = []
xmu = []
for i in range(len(gmm.weights_)):
xmu.append(gmm.weights_[i] * w)
x1.append(gmm.means_[i])
x2.append(np.sqrt(gmm.covariances_[i]))
data = {'xntmu': np.array(xmu), 'xnt1': np.array(x1), 'xnt2': np.array(x2)}
record = {}
record[targetname] = data
with open(filepath, 'w') as f:
json.dump(record, f, cls=MyEncoder)
joblib.dump(gmm, modelpath)
def gmm_source_class(X, w, slist, covtype='diag'):
bicr = 10000000000
c = 0
gmm = mixture.GaussianMixture(n_components=slist[c], covariance_type=covtype)
gmm.fit(X)
while (c < len(slist)) and (gmm.bic(X) < bicr):
c += 1
bicr = gmm.bic(X)
gmm = mixture.GaussianMixture(n_components=slist[c], covariance_type=covtype)
gmm.fit(X)
c = c - 1
gmm = mixture.GaussianMixture(n_components=slist[c], covariance_type=covtype)
gmm.fit(X)
x1 = []
x2 = []
xmu = []
for i in range(len(gmm.weights_)):
xmu.append(gmm.weights_[i] * w)
x1.append(gmm.means_[i])
x2.append(np.sqrt(gmm.covariances_[i]))
return np.array(xmu), np.array(x1), np.array(x2), gmm
def gmm_source(xs, ys, filepath, sourcename, slist=[], covtype='diag'):
if not os.path.exists(filepath):
ty = Counter(ys)
lys = len(ys)
lc = len(Counter(ys))
ws = {}
for i in range(lc):
ws[i] = ty[i] / lys
if len(slist) == 0:
slist = np.arange(1, lys + 1)
for i in range(lc):
xtmu, xt1, xt2, gmmt = gmm_source_class(xs[np.where(ys == i)[0]], ws[i], slist, covtype=covtype)
yts = np.ones(len(xt1)) * i
if i == 0:
xn1, xn2, xmu = xt1, xt2, xtmu
yns = yts
else:
xmu = np.hstack((xmu, xtmu))
xn1 = np.vstack((xn1, xt1))
xn2 = np.vstack((xn2, xt2))
yns = np.hstack((yns, yts))
data = {'xmu': xmu, 'xn1': xn1, 'xn2': xn2, 'yns': yns}
record = {sourcename: data}
with open(filepath, 'w') as f:
json.dump(record, f, cls=MyEncoder)
def entropic_partial_wasserstein(a, b, M, reg, m=1, numItermax=1000,
stopThr=1e-100, verbose=False, log=False):
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
dim_a, dim_b = M.shape
if len(a) == 0:
a = np.ones(dim_a, dtype=np.float64) / dim_a
if len(b) == 0:
b = np.ones(dim_b, dtype=np.float64) / dim_b
if m > np.min((np.sum(a), | np.sum(b) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on 23/04/2020
Author : <NAME>
"""
from __future__ import print_function, division
import os
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from astropy.table import Table, vstack
import matplotlib.pyplot as plt
from matplotlib import cm
import pymc3 as pm
import theano.tensor as tt
import seaborn as sns
from tqdm import tqdm
import context
import bsf_old
class Spindex():
""" Linearly interpolated line-strength indices."""
def __init__(self, temptable, indnames, parnames):
self.table = temptable
self.indnames = indnames
self.parnames = parnames
self.nindices = len(indnames)
self.nparams = len(parnames)
# Interpolating models
pdata = np.array([temptable[col].data for col in parnames]).T
tdata = np.array([temptable[col].data for col in indnames]).T
print(pdata.shape)
print(tdata.shape)
input(404)
nodes = []
for param in parnames:
x = np.unique(temptable[param]).data
nodes.append(x)
coords = np.meshgrid(*nodes, indexing='ij')
dim = coords[0].shape + (self.nindices,)
data = np.zeros(dim)
with np.nditer(coords[0], flags=['multi_index']) as it:
while not it.finished:
multi_idx = it.multi_index
x = np.array([coords[i][multi_idx] for i in range(len(coords))])
idx = (pdata == x).all(axis=1).nonzero()[0]
data[multi_idx] = tdata[idx]
it.iternext()
self.f = RegularGridInterpolator(nodes, data, fill_value=0)
########################################################################
# Get grid points to handle derivatives
inner_grid = []
thetamin = []
thetamax = []
for par in self.parnames:
thetamin.append(np.min(self.table[par].data))
thetamax.append(np.max(self.table[par].data))
inner_grid.append(np.unique(self.table[par].data)[1:-1])
self.thetamin = np.array(thetamin)
self.thetamax = np.array(thetamax)
self.inner_grid = inner_grid
def __call__(self, theta):
return self.f(theta)[0]
def gradient(self, theta, eps=1e-6):
# Clipping theta to avoid border problems
theta = np.maximum(theta, self.thetamin + 2 * eps)
theta = np.minimum(theta, self.thetamax - 2 * eps)
grads = np.zeros((self.nparams, self.nindices))
for i,t in enumerate(theta):
epsilon = np.zeros(self.nparams)
epsilon[i] = eps
# Check if data point is in inner grid
in_grid = t in self.inner_grid[i]
if in_grid:
tp1 = theta + 2 * epsilon
tm1 = theta + epsilon
grad1 = (self.__call__(tp1) - self.__call__(tm1)) / (2 * eps)
tp2 = theta - epsilon
tm2 = theta - 2 * epsilon
grad2 = (self.__call__(tp2) - self.__call__(tm2)) / (2 * eps)
grads[i] = 0.5 * (grad1 + grad2)
else:
tp = theta + epsilon
tm = theta - epsilon
grads[i] = (self.__call__(tp) - self.__call__(tm)) / (2 * eps)
return grads
def build_model(lick, lickerr, spindex, loglike="studt"):
model = pm.Model()
with model:
theta = []
for param in spindex.parnames:
vmin = spindex.table[param].min()
vmax = spindex.table[param].max()
v = pm.Uniform(param, lower=vmin, upper=vmax)
theta.append(v)
if loglike == "studt":
nu = pm.Uniform("nu", lower=2.01, upper=50, testval=10.)
theta.append(nu)
if loglike == "normal2":
x = pm.Normal("x", mu=0, sd=1)
s = pm.Deterministic("S", 1. + pm.math.exp(x))
theta.append(s)
theta = tt.as_tensor_variable(theta).T
logl = bsf_old.LogLike(lick, spindex.indnames, lickerr, spindex,
loglike=loglike)
pm.DensityDist('loglike', lambda v: logl(v),
observed={'v': theta})
return model
def run_bsf(lick, lickerr, spindex, db, loglike="studt", draws=500, redo=False):
"""Runs BSF on Lick indices. """
summary = "{}.csv".format(db)
if os.path.exists(summary) and not redo:
return
model = build_model(lick, lickerr, spindex, loglike=loglike)
with model:
trace = pm.sample(draws=draws, tune=draws)
df = pm.stats.summary(trace)
df.to_csv(summary)
pm.save_trace(trace, db, overwrite=True)
return
def load_traces(db, params):
if not os.path.exists(db):
return None
ntraces = len(os.listdir(db))
data = [np.load(os.path.join(db, _, "samples.npz")) for _ in
os.listdir(db)]
traces = []
for param in params:
v = np.vstack([data[num][param] for num in range(ntraces)]).flatten()
traces.append(v)
traces = np.column_stack(traces)
return traces
def make_table(trace, params, db, binnum, redo=False):
outtab = "{}_results.fits".format(db)
if os.path.exists(outtab) and not redo:
tab = Table.read(outtab)
return tab
v = np.percentile(trace, 50, axis=0)
vmax = | np.percentile(trace, 84, axis=0) | numpy.percentile |
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert | np.allclose(-.5 * a, b) | numpy.allclose |
import numpy as np
from HamiltonianModule import spin_operators
class QES_1D:
def __init__(self, d, chi, D, l_phys, tau, spin='half', operators=None):
# l_phys: number of sites in the bulk to simulate the bath
self.d = d
self.chi = chi # bath dimension
self.D = D
self.l_phys = l_phys
self.tau = tau
self.gate_phys = np.zeros(0)
self.tensors_gate_phys = [np.zeros(0), np.zeros(0)] # two tensors of the physical gates, by SVD or QR
self.gate_bath = [np.zeros(0), np.zeros(0)] # the two physical-bath Hamiltonians
self.hamilt_bath = [np.zeros(0), np.zeros(0)]
if operators is None:
op_half = spin_operators(spin)
self.operators = [op_half['id'], op_half['sx'], op_half['sy'], op_half['sz'],
op_half['su'], op_half['sd']]
else:
self.operators = operators
def obtain_physical_gate_tensors(self, hamilt):
"""
gate_phys: physical gate (or shifted physical Hamiltonian) is 4-th tensor
0 1
\ /
G
/ \
2 3
"""
self.gate_phys = np.eye(self.d ** 2) - self.tau * hamilt
self.gate_phys = self.gate_phys.reshape(self.d, self.d, self.d, self.d)
u, s, v = np.linalg.svd(self.gate_phys.transpose(0, 2, 1, 3).reshape(self.d**2, self.d**2))
s = np.diag(s ** 0.5)
self.tensors_gate_phys[0] = u.dot(s).reshape(self.d, self.d, self.d**2).transpose(0, 2, 1)
self.tensors_gate_phys[1] = s.dot(v).reshape(self.d**2, self.d, self.d).transpose(1, 0, 2)
def obtain_bath_h(self, env, which, way='shift'):
"""
h_bath is 4-th tensor
0 1
\ /
G
/ \
2 3
"""
if (which is 'left') or (which is 'both'):
self.gate_bath[0] = np.tensordot(env[0], self.tensors_gate_phys[1], ([1], [1]))
self.gate_bath[0] = self.gate_bath[0].transpose(0, 2, 1, 3)
s = self.gate_bath[0].shape
self.hamilt_bath[0] = self.gate_bath[0].reshape(s[0] * s[1], s[2] * s[3]).copy()
lm, u = np.linalg.eigh(self.hamilt_bath[0])
lm /= np.max(lm)
if way is 'shift':
self.hamilt_bath[0] = u.dot(np.diag((np.ones((
s[0] * s[1],)) - lm) / self.tau)).dot(u.T.conj())
else:
self.hamilt_bath[0] = u.dot(np.diag(-np.log(abs(lm))/self.tau)).dot(u.T.conj())
self.hamilt_bath[0] = self.hamilt_bath[0] - np.trace(self.hamilt_bath[0]) * np.eye(
s[0]*s[1]) / (s[0]*s[1])
self.hamilt_bath[0] = (self.hamilt_bath[0] + self.hamilt_bath[0].T.conj())/2
if (which is 'right') or (which is 'both'):
self.gate_bath[1] = np.tensordot(self.tensors_gate_phys[0], env[1], ([1], [1]))
self.gate_bath[1] = self.gate_bath[1].transpose(0, 2, 1, 3)
s = self.gate_bath[1].shape
self.hamilt_bath[1] = self.gate_bath[1].reshape(s[0] * s[1], s[2] * s[3]).copy()
lm, u = np.linalg.eigh(self.hamilt_bath[1])
lm /= np.max(lm)
if way is 'shift':
self.hamilt_bath[1] = u.dot(np.diag((np.ones((
s[0] * s[1],)) - lm) / self.tau)).dot(u.T.conj())
else:
self.hamilt_bath[1] = u.dot(np.diag(-np.log(abs(lm))/self.tau)).dot(u.T.conj())
self.hamilt_bath[1] = self.hamilt_bath[1] - np.trace(self.hamilt_bath[1]) * np.eye(
s[0] * s[1]) / (s[0] * s[1])
self.hamilt_bath[1] = (self.hamilt_bath[1] + self.hamilt_bath[1].T.conj()) / 2
def obtain_bath_h_by_effective_ops_1d(self, hb_onsite, op_effective, h_index):
self.hamilt_bath[0] = np.kron(hb_onsite, np.eye(self.d))
for n in range(h_index.shape[0]):
op1 = op_effective[int(h_index[n, 0])]
op2 = self.operators[int(h_index[n, 1])]
j = h_index[n, 2]
self.hamilt_bath[0] += j * | np.kron(op1, op2) | numpy.kron |
##########################################
# File: visualise.py #
# Copyright <NAME> 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
from __future__ import print_function
import argparse
import json
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from uniform_bspline import UniformBSpline
# Colours
C = dict(b='#377EB8', r='#E41A1C', g='#4DAF4A', o='#FF7F00')
# generate_figure
def generate_figure(z, num_samples, empty=False, disable={}, verbose=True):
degree, num_control_points, dim, is_closed = (
z['degree'], z['num_control_points'], z['dim'], z['is_closed'])
if verbose:
print(' degree:', degree)
print(' num_control_points:', num_control_points)
print(' dim:', dim)
print(' is_closed:', is_closed)
c = UniformBSpline(degree, num_control_points, dim, is_closed=is_closed)
Y, w, u, X = [np.array(z[k]) for k in 'YwuX']
if verbose:
print(' num_data_points:', Y.shape[0])
kw = {}
if Y.shape[1] == 3:
kw['projection'] = '3d'
f = plt.figure()
if empty:
ax = f.add_axes((0, 0, 1, 1), **kw)
ax.set_xticks([])
ax.set_yticks([])
if Y.shape[1] == 3:
ax.set_zticks([])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
else:
ax = f.add_subplot(111, **kw)
ax.set_aspect('equal')
def plot(X, *args, **kwargs):
ax.plot(*(tuple(X.T) + args), **kwargs)
if 'Y' not in disable:
plot(Y, '.', c=C['r'])
if 'u' not in disable:
for m, y in zip(c.M(u, X), Y):
plot(np.r_['0,2', m, y], '-', c=C['o'])
if 'X' not in disable:
plot(X, 'o--', ms=6.0, c='k', mec='k')
if 'M' not in disable:
plot(c.M(c.uniform_parameterisation(num_samples), X), '-',
c=C['b'], lw=3.0)
if not empty:
e = z.get('e')
if e is not None:
ax.set_title('Energy: {:.7e}'.format(e))
return f
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('output_path', nargs='?')
parser.add_argument('--num-samples', type=int, default=1024)
parser.add_argument('--width', type=float, default=6.0)
parser.add_argument('--height', type=float, default=4.0)
parser.add_argument('--dpi', type=int, default=100)
parser.add_argument('--empty', default=False, action='store_true')
parser.add_argument('-d', '--disable', action='append', default=[],
choices={'Y', 'u', 'M', 'X'})
args = parser.parse_args()
if not os.path.isdir(args.input_path):
print('Input:', args.input_path)
with open(args.input_path, 'r') as fp:
z = json.load(fp)
f = generate_figure(z, args.num_samples,
empty=args.empty, disable=args.disable)
if args.output_path is None:
plt.show()
else:
print('Output:', args.output_path)
f.set_size_inches((args.width, args.height))
f.savefig(args.output_path, dpi=args.dpi,
bbox_inches=0.0, pad_inches='tight')
else:
if args.output_path is None:
raise ValueError('`output_path` required')
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
# Load all input files to `states`.
input_files = sorted(os.listdir(args.input_path),
key=lambda f: int(os.path.splitext(f)[0]))
input_paths = [os.path.join(args.input_path, f) for f in input_files]
print('Input:')
states = []
for input_path in input_paths:
print(' ', input_path)
with open(input_path, 'r') as fp:
states.append(json.load(fp))
# Determine `xlim`, `ylim`, and (potentially) `zlim`.
bounds = sum([[(np.min(z[k], axis=0), np.max(z[k], axis=0))
for z in states]
for k in 'XY'], [])
min_, max_ = list(zip(*bounds))
min_, max_ = np.min(min_, axis=0), | np.max(max_, axis=0) | numpy.max |
import numpy as np
from MagniPy.lensdata import Data
import subprocess
import shutil
import scipy.ndimage.filters as sfilt
import itertools
from copy import deepcopy
def dr(x1,x2,y1,y2):
return np.sqrt((x1-x2)**2+(y1-y2)**2)
def snap_to_bins(data, xbin_centers, dx, ybin_centers, dy, ranges):
new_datax = deepcopy(data[:, 0])
new_datay = deepcopy(data[:, 1])
new_datax[np.where(new_datax <= ranges[0][0])] = xbin_centers[0]
new_datax[np.where(new_datax >= ranges[0][1])] = xbin_centers[-1]
new_datay[np.where(new_datay <= ranges[1][0])] = ybin_centers[0]
new_datay[np.where(new_datay >= ranges[1][1])] = ybin_centers[-1]
new_data = None
xx, yy = np.meshgrid(xbin_centers, ybin_centers)
coords = zip(np.round(xx.ravel(), 4), np.round(yy.ravel(), 4))
for i, (cenx, ceny) in enumerate(coords):
subx = np.absolute(new_datax - cenx) * dx ** -1
suby = np.absolute(new_datay - ceny) * dy ** -1
inds = np.where(np.logical_and(subx < 1, suby < 1))[0]
if len(inds) > 0:
new_array = np.column_stack((np.array([cenx] * len(inds)), np.array([ceny] * len(inds))))
if new_data is None:
new_data = deepcopy(new_array)
else:
new_data = np.vstack((new_data, new_array))
return new_data
def approx_theta_E(ximg,yimg):
dis = []
xinds,yinds = [0,0,0,1,1,2],[1,2,3,2,3,3]
for (i,j) in zip(xinds,yinds):
dx,dy = ximg[i] - ximg[j], yimg[i] - yimg[j]
dr = (dx**2+dy**2)**0.5
dis.append(dr)
dis = np.array(dis)
greatest = np.argmax(dis)
dr_greatest = dis[greatest]
dis[greatest] = 0
second_greatest = np.argmax(dis)
dr_second = dis[second_greatest]
return 0.5*(dr_greatest*dr_second)**0.5
def min_img_sep_ranked(ximg, yimg):
ximg, yimg = np.array(ximg), np.array(yimg)
d1 = dr(ximg[0], ximg[1:], yimg[0], yimg[1:])
d2 = dr(ximg[1], [ximg[0], ximg[2], ximg[3]], yimg[1],
[yimg[0], yimg[2], yimg[3]])
d3 = dr(ximg[2], [ximg[0], ximg[1], ximg[3]], yimg[2],
[yimg[0], yimg[1], yimg[3]])
d4 = dr(ximg[3], [ximg[0], ximg[1], ximg[2]], yimg[3],
[yimg[0], yimg[1], yimg[2]])
idx1 = np.argmin(d1)
idx2 = np.argmin(d2)
idx3 = np.argmin(d3)
idx4 = np.argmin(d4)
x_2, x_3, x_4 = [ximg[0], ximg[2], ximg[3]], [ximg[0], ximg[1], ximg[3]], [ximg[0], ximg[1], ximg[2]]
y_2, y_3, y_4 = [yimg[0], yimg[2], yimg[3]], [yimg[0], yimg[1], yimg[3]], [yimg[0], yimg[1], yimg[2]]
theta1 = np.arctan((yimg[1:][idx1] - yimg[0])/(ximg[1:][idx1] - ximg[0]))
theta2 = np.arctan((y_2[idx2] - yimg[1]) / (x_2[idx2] - ximg[1]))
theta3 = np.arctan((y_3[idx3] - yimg[2]) / (x_3[idx3] - ximg[2]))
theta4 = np.arctan((y_4[idx4] - yimg[3]) / (x_4[idx4] - ximg[3]))
return np.array([np.min(d1), np.min(d2), np.min(d3), np.min(d4)]), np.array([theta1, theta2,
theta3, theta4])
def min_img_sep(ximg,yimg):
assert len(ximg) == len(yimg)
dr = []
if len(ximg) == 1:
return 1
elif len(ximg) == 0:
return 1
try:
for i in range(0,int(len(ximg)-1)):
for j in range(i+1,int(len(ximg))):
dx = ximg[i] - ximg[j]
dy = yimg[i] - yimg[j]
dr.append((dx**2 + dy**2)**0.5)
return min(dr)
except:
print('problem with the fit...')
return 1
def sort_image_index(ximg,yimg,xref,yref):
assert len(xref) == len(ximg)
x_self = np.array(list(itertools.permutations(ximg)))
y_self = np.array(list(itertools.permutations(yimg)))
indexes = [0, 1, 2, 3]
index_iterations = list(itertools.permutations(indexes))
delta_r = []
for i in range(0, int(len(x_self))):
dr = 0
for j in range(0, int(len(x_self[0]))):
dr += (x_self[i][j] - xref[j]) ** 2 + (y_self[i][j] - yref[j]) ** 2
delta_r.append(dr ** .5)
min_indexes = np.array(index_iterations[np.argmin(delta_r)])
return min_indexes
def coordinates_inbox(box_dx,box_dy,centered_x,centered_y):
return np.logical_and(np.logical_and(-0.5*box_dx < centered_x, centered_x < 0.5*box_dx),
np.logical_and(-0.5*box_dy < centered_y, centered_y < 0.5*box_dy))
def confidence_interval(percentile,data):
data=np.array(data)
data.sort()
L = len(data)
counter = 0
while True:
value = data[counter]
if counter>=L*percentile:
break
counter+=1
return value
def quick_confidence(centers, heights, percentile):
total = np.sum(heights)
summ, index = 0, 0
while summ < total * percentile:
summ += heights[index]
index += 1
return centers[index-1]
def read_data(filename='',N=None):
with open(filename,'r') as f:
lines = f.readlines()
dsets = []
for line in lines:
line = line.split(' ')
n = int(line[0])
try:
srcx,srcy = float(line[1]),float(line[2])
except:
srcx,srcy = None,None
x1,x2,x3,x4,y1,y2,y3,y4 = float(line[3]),float(line[7]),float(line[11]),float(line[15]),float(line[4]),\
float(line[8]),float(line[12]),float(line[16])
m1,m2,m3,m4 = float(line[5]),float(line[9]),float(line[13]),float(line[17])
t1,t2,t3,t4 = float(line[6]),float(line[10]),float(line[14]),float(line[18])
dsets.append(Data(x=[x1,x2,x3,x4],y=[y1,y2,y3,y4],m=[m1,m2,m3,m4],
t=[t1,t2,t3,t4],source=[srcx,srcy]))
if N is not None and len(dsets)>=N:
break
return dsets
def write_fluxes(filename='',fluxes = [], mode='append',summed_in_quad=True):
if summed_in_quad:
fluxes = np.squeeze(fluxes)
with open(filename,'a') as f:
if isinstance(fluxes,float):
f.write(str(fluxes)+'\n')
else:
for val in fluxes:
f.write(str(val)+'\n')
return
fluxes = np.array(fluxes)
if mode == 'append':
m = 'a'
else:
m = 'w'
if fluxes.ndim == 1:
with open(filename, m) as f:
for val in fluxes:
f.write(str(val) + ' ')
f.write('\n')
else:
N = int(np.shape(fluxes)[0])
with open(filename,m) as f:
for n in range(0,N):
for val in fluxes[n,:]:
f.write(str(val)+' ')
f.write('\n')
def write_data(filename='',data_list=[],mode='append'):
def single_line(dset=classmethod):
lines = ''
lines += str(dset.nimg)+' '+str(dset.srcx)+' '+str(dset.srcy)+' '
for i in range(0,int(dset.nimg)):
for value in [dset.x[i],dset.y[i],dset.m[i],dset.t[i]]:
if value is None:
lines += '0 '
else:
lines += str(value)+' '
return lines+'\n'
if mode=='append':
with open(filename,'a') as f:
for dataset in data_list:
f.write(single_line(dataset))
else:
with open(filename,'w') as f:
for dataset in data_list:
f.write(single_line(dataset))
def integrate_profile(profname,limit,inspheres=False,**kwargs):
if profname=='nfw':
rs=kwargs['rs']
ks=kwargs['ks']
n=limit*rs**-1
if inspheres:
rho0 = 86802621404*ks*rs**-1
n*=rs
r200 = kwargs['c']*rs
return 4*np.pi*rho0*rs**3*(np.log(1+r200*n**-1)- n*(n+r200)**-1)
else:
return 2*np.pi*rs**2*ks*(np.log(.25*n**2)+2*np.arctanh(np.sqrt(1-n**2))*(np.sqrt(1-n**2))**-1)
elif profname=='SIE':
b = kwargs['SIE_Rein']
return np.pi*limit*b
def rotate(xcoords,ycoords,angle):
return xcoords*np.cos(angle)+ycoords*np.sin(angle),-xcoords*np.sin(angle)+ycoords*np.cos(angle)
def img_sept(x,y):
return np.sort(np.array([dr(x[0],x[1],y[0],y[1]),dr(x[0],x[2],y[0],y[2]),dr(x[0],x[3],y[0],y[3]),
dr(x[1],x[2],y[1],y[2]),dr(x[1],x[3],y[1],y[3]),dr(x[2],x[3],y[2],y[3])]))
def identify(x,y,RE):
separations = img_sept(x,y)
if separations[0] > RE:
return 0
if separations[1] <= 1.15*RE:
return 2
elif separations[0] <= 0.85*RE:
return 1
else:
return 0
def read_dat_file(fname):
x_srcSIE, y_srcSIE = [], []
with open(fname, 'r') as f:
nextline = False
dosrc = False
doimg = False
count = 0
readcount = 0
for line in f:
row = line.split(" ")
#print(row,fname)
#row_split = filter(None, row)
row_split = list(filter(None, row))
if row_split[0] == 'alpha':
macromodel = row_split
continue
if row_split[0] == 'Source':
nextline = True
dosrc = True
src = []
continue
if nextline and dosrc:
for item in row:
try:
src.append(float(item))
except ValueError:
continue
x_srcSIE.append(src[0])
y_srcSIE.append(src[1])
nextline = False
dosrc = False
continue
if row_split[0] == 'images:\n':
nextline = True
doimg = True
count = 0
x, y, f, t = [], [], [], []
continue
if nextline and doimg:
count += 1
numbers = []
for item in row:
try:
numbers.append(float(item))
except ValueError:
continue
x.append(numbers[4])
y.append(numbers[5])
f.append(numbers[6])
t.append(numbers[7])
if int(count) == 4:
t = np.array(t)
if min(t) < 0:
t += -1 * min(t)
xpos = x
ypos = y
fr = np.array(f)
tdel = np.array(t)
return xpos, ypos, fr, t, macromodel, [x_srcSIE[0], y_srcSIE[0]]
def read_gravlens_out(fnames):
vector = []
if isinstance(fnames,list):
for fname in fnames:
with open(fname, 'r') as f:
lines = f.readlines()
f.close()
imgline = lines[1].split(' ')
numimg = int(imgline[1])
xpos, ypos, mag, tdelay = [], [], [], []
for i in range(0, numimg):
data = lines[2 + i].split(' ')
data = filter(None, data)
xpos.append(float(data[0]))
ypos.append(float(data[1]))
mag.append(np.absolute(float(data[2])))
tdelay.append(float(data[3]))
vector.append([np.array(xpos), np.array(ypos), np.array(mag), np.array(tdelay), numimg])
else:
with open(fnames, 'r') as f:
lines = f.readlines()
f.close()
imgline = lines[1].split(' ')
numimg = int(imgline[1])
xpos, ypos, mag, tdelay = [], [], [], []
for i in range(0, numimg):
data = lines[2 + i].split(' ')
data = filter(None, data)
xpos.append(float(data[0]))
ypos.append(float(data[1]))
mag.append(np.absolute(float(data[2])))
tdelay.append(float(data[3]))
vector.append([np.array(xpos), np.array(ypos), np.array(mag), np.array(tdelay), numimg])
return vector
def read_chain_out(fname, N=1):
nimg, srcx, srcy, x1, y1, m1, t1, x2, y2, m2, t2, x3, y3, m3, t3, x4, y4, m4, t4 = np.loadtxt(fname, unpack=True)
return nimg, [srcx, srcy], [x1, x2, x3, x4], [y1, y2, y3, y4], [m1, m2, m3, m4], [t1, t2, t3, t4]
def polar_to_cart(ellip, theta, polar_to_cart = True):
xcomp = ellip*np.cos(2*theta*np.pi*180**-1)
ycomp = ellip*np.sin(2*theta*np.pi*180**-1)
return xcomp,ycomp
def cart_to_polar(e1, e2, polar_to_cart = True):
if e1==0:
return 0,0
else:
return np.sqrt(e1**2+e2**2),0.5*np.arctan2(e2,e1)*180*np.pi**-1
def array2image(array, nx=0, ny=0):
"""
returns the information contained in a 1d array into an n*n 2d array (only works when lenght of array is n**2)
:param array: image values
:type array: array of size n**2
:returns: 2d array
:raises: AttributeError, KeyError
"""
if nx == 0 or ny == 0:
n = int(np.sqrt(len(array)))
if n**2 != len(array):
raise ValueError("lenght of input array given as %s is not square of integer number!" %(len(array)))
nx, ny = n, n
image = array.reshape(int(nx), int(ny))
return image
def image2array(image):
"""
returns the information contained in a 2d array into an n*n 1d array
:param array: image values
:type array: array of size (n,n)
:returns: 1d array
:raises: AttributeError, KeyError
"""
nx, ny = image.shape # find the size of the array
imgh = np.reshape(image, nx*ny) # change the shape to be 1d
return imgh
def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False):
"""
:param numPix: number of pixels per axis
:param deltapix: pixel size
:param subgrid_res: sub-pixel resolution (default=1)
:return: x, y position information in two 1d arrays
"""
numPix_eff = numPix*subgrid_res
deltapix_eff = deltapix/float(subgrid_res)
a = np.arange(numPix_eff)
matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2)
if left_lower is True:
x_grid = matrix[:, 0]*deltapix
y_grid = matrix[:, 1]*deltapix
else:
x_grid = (matrix[:, 0] - (numPix_eff-1)/2.)*deltapix_eff
y_grid = (matrix[:, 1] - (numPix_eff-1)/2.)*deltapix_eff
shift = (subgrid_res-1)/(2.*subgrid_res)*deltapix
return array2image(x_grid - shift), array2image(y_grid - shift)
def filter_by_position(lens_components, x_filter=None, y_filter=None, mindis_front=0.5, mindis_back=0.3, log_masscut_low=7,
zmain=None, cosmology=None):
"""
:param xsub: sub x coords
:param ysub: sub y coords
:param x_filter: img x coords
:param y_filter: img y coords
:param mindis: max 2d distance
:return: filtered subhalos
"""
masscut_low = 10**log_masscut_low
keep_index = []
for index, deflector in enumerate(lens_components):
if not deflector.is_subhalo:
keep_index.append(index)
continue
if zmain >= deflector.redshift:
"""
for LOS halos; keep if it's rescaled position is near an image
"""
scale = np.ones_like(x_filter)
_mindis = mindis_front
else:
#zmain < deflector.redshift:
"""
for halos behind the main lens
"""
beta = cosmology.beta(deflector.redshift,zmain,cosmology.zsrc)
scale = np.ones_like(x_filter)*(1 - beta)
_mindis = mindis_back
#scale_mindis = 0.5
x, y = deflector.lenstronomy_args['center_x'], deflector.lenstronomy_args['center_y']
for i in range(0, len(x_filter)):
dr = ((x - x_filter[i]*scale[i]) ** 2 + (y - y_filter[i]*scale[i]) ** 2) ** .5
if dr <= _mindis or deflector.other_args['mass'] >= masscut_low:
keep_index.append(index)
break
newcomponents = [lens_components[i] for i in keep_index]
new_redshift_list = [lens_components[i].redshift for i in keep_index]
return newcomponents, new_redshift_list
def copy_directory(dirname,location):
shutil.copy(dirname,location)
def create_directory(dirname=''):
proc = subprocess.Popen(['mkdir', dirname])
proc.wait()
def delete_dir(dirname=''):
shutil.rmtree(dirname)
def rebin_image(image,factor):
if np.shape(image)[0]%factor != 0:
raise ValueError('size of image must be divisible by factor')
def rebin(a, shape):
sh = shape[0], a.shape[0] // shape[0], shape[1], a.shape[1] // shape[1]
return a.reshape(sh).mean(-1).mean(1)
size = int(np.shape(image)[0]*factor**-1)
return rebin(image,[size,size])
def convolve_image(image,kernel='Gaussian',scale=None):
if kernel == 'Gaussian':
grid = sfilt.gaussian_filter(image, scale * (2.355) ** -1, mode='constant', cval=0)
elif kernel == 'HST':
grid = sfilt.gaussian_filter(image, scale * (2.355) ** -1, mode='constant', cval=0)
return grid
def nfw_kr(X):
def f(x):
if isinstance(x, int) or isinstance(x, float):
if x > 1:
return np.arctan((x ** 2 - 1) ** .5) * (x ** 2 - 1) ** -.5
elif x < 1:
return np.arctanh((1 - x ** 2) ** .5) * (1 - x ** 2) ** -.5
else:
return 1
else:
inds1 = np.where(x < 1)
inds2 = np.where(x > 1)
vals = np.ones_like(x)
flow = (1 - x[inds1] ** 2) ** .5
fhigh = (x[inds2] ** 2 - 1) ** .5
vals[inds1] = np.arctanh(flow) * flow ** -1
vals[inds2] = np.arctan(fhigh) * fhigh ** -1
return vals
return 2 * (1 - f(X)) * (X ** 2 - 1) ** -1
def ellipse_coordinates(coordinatesx,coordinatesy,rmax,q=0.08,theta=None):
a = rmax
b = q*rmax
x,y = rotate(coordinatesx,coordinatesy,theta)
ellipse = (x*a**-1) ** 2 + (y*b**-1) ** 2 <= 1
#return x[ellipse],y[ellipse]
return ellipse
def phi_q2_ellipticity(phi, q):
"""
:param phi:
:param q:
:return:
"""
e1 = (1.-q)/(1.+q)*np.cos(2*phi)
e2 = (1.-q)/(1.+q)*np.sin(2*phi)
return e1, e2
def ellipticity2phi_q(e1, e2):
"""
:param e1:
:param e2:
:return:
"""
phi = np.arctan2(e2, e1)/2
c = np.sqrt(e1**2+e2**2)
if c > 0.999:
c = 0.999
q = (1-c)/(1+c)
return phi, q
def find_closest_xy(ximg,yimg,xref,yref):
dx = np.array(ximg) - xref
dy = np.array(yimg) - yref
dr = np.sqrt(dx**2+dy**2)
return np.argsort(dr)
def chi_square_img(data1x,data1y,data2x,data2y,sigma,reorder=False):
if len(data1x) != len(data2x):
return 10**10
if reorder:
inds = sort_image_index(data1x,data1y,data2x,data2y)
data1x = data1x[inds]
data1y = data1y[inds]
return np.sum(((data1x - data2x)*sigma**-1)**2 + ((data1y - data2y)*sigma**-1)**2)
def chi_square(d1,d2,sigma):
return np.sum(((d1 - d2)*sigma**-1)**2)
def flux_at_edge(image):
maxbright = | np.max(image) | numpy.max |
import math
import numpy as np
from .fixtures import *
from ..common.representation.encoding import *
def test_boundaries_creation_int(min_bound_int, max_bound_int):
b = Boundaries(min_bound_int, max_bound_int, type=np.int)
assert len(b) == 3
assert len(b[0]) == NB_ATTRIBUTES
assert len(b[1]) == NB_ATTRIBUTES
assert len(b[2]) == NB_ATTRIBUTES
assert np.all(b[0] == MIN_VAL_INT)
assert np.all(b[1] == MAX_VAL_INT)
assert | np.all(b[2] == MAX_VAL_INT - MIN_VAL_INT) | numpy.all |
import os
import time
import torch
import numpy as np
def env_factory(path, verbose=False, **kwargs):
from functools import partial
"""
Returns an *uninstantiated* environment constructor.
Since environments containing cpointers (e.g. Mujoco envs) can't be serialized,
this allows us to pass their constructors to Ray remote functions instead
(since the gym registry isn't shared across ray subprocesses we can't simply
pass gym.make() either)
Note: env.unwrapped.spec is never set, if that matters for some reason.
"""
if 'digit' in path.lower():
if not os.path.isdir('digit'):
print("You appear to be missing a './digit' directory.")
print("You can clone the cassie environment repository with:")
print("git clone https://github.com/siekmanj/digit")
exit(1)
from digit.digit import DigitEnv
path = path.lower()
if 'random_dynamics' in path or 'dynamics_random' in path or 'randomdynamics' in path or 'dynamicsrandom' in path:
dynamics_randomization = True
else:
dynamics_randomization = False
if 'impedance' in path:
impedance = True
else:
impedance = False
if 'standing' in path:
standing = True
else:
standing = False
if 'footpos' in path:
footpos = True
else:
footpos = False
if 'perception' in path:
perception = True
else:
perception = False
if 'stairs' in path:
stairs = True
else:
stairs = False
if 'hop_only' in path:
hop_only = True
else:
hop_only = False
if 'walk_only' in path:
walk_only = True
else:
walk_only = False
if 'height' in path:
height = True
else:
height = False
return partial(DigitEnv, dynamics_randomization=dynamics_randomization, impedance=impedance, standing=standing, footpos=footpos, perception=perception, stairs=stairs, hop_only=hop_only, walk_only=walk_only, height=height)
if 'cassie' in path.lower():
if not os.path.isdir('cassie'):
print("You appear to be missing a './cassie' directory.")
print("You can clone the cassie environment repository with:")
print("git clone https://github.com/siekmanj/cassie")
exit(1)
from cassie.cassie import CassieEnv_v2
path = path.lower()
if 'random_dynamics' in path or 'dynamics_random' in path or 'randomdynamics' in path or 'dynamicsrandom' in path:
dynamics_randomization = True
else:
dynamics_randomization = False
if 'nodelta' in path or 'no_delta' in path:
no_delta = True
else:
no_delta = False
no_delta = True
if 'stateest' in path or 'state_est' in path:
state_est = True
else:
state_est = False
state_est = True
if 'clock_based' in path or 'clockbased' in path:
clock = True
else:
clock = False
if 'statehistory' in path or 'state_history' in path:
history=1
else:
history=0
if 'legacy' in path:
legacy = True
else:
legacy = False
legacy = False
if 'impedance' in path:
impedance = True
else:
impedance = False
if 'height' in path:
height = True
else:
height = False
if verbose:
print("Created cassie env with arguments:")
print("\tdynamics randomization: {}".format(dynamics_randomization))
print("\tstate estimation: {}".format(state_est))
print("\tno delta: {}".format(no_delta))
print("\tclock based: {}".format(clock))
print("\timpedance control: {}".format(impedance))
print("\theight control: {}".format(height))
return partial(CassieEnv_v2, 'walking', clock=clock, state_est=state_est, no_delta=no_delta, dynamics_randomization=dynamics_randomization, history=history, legacy=legacy, impedance=impedance, height=height)
import gym
spec = gym.envs.registry.spec(path)
_kwargs = spec._kwargs.copy()
_kwargs.update(kwargs)
try:
if callable(spec._entry_point):
cls = spec._entry_point(**_kwargs)
else:
cls = gym.envs.registration.load(spec._entry_point)
except AttributeError:
if callable(spec.entry_point):
cls = spec.entry_point(**_kwargs)
else:
cls = gym.envs.registration.load(spec.entry_point)
return partial(cls, **_kwargs)
def eval_policy(policy, min_timesteps=1000, max_traj_len=1000, visualize=True, env=None, verbose=True):
env_name = env
with torch.no_grad():
if env_name is None:
env = env_factory(policy.env_name)()
else:
env = env_factory(env_name)()
if verbose:
print("Policy is a: {}".format(policy.__class__.__name__))
reward_sum = 0
env.dynamics_randomization = False
total_t = 0
episodes = 0
obs_states = {}
mem_states = {}
while total_t < min_timesteps:
state = env.reset()
done = False
timesteps = 0
eval_reward = 0
episodes += 1
if hasattr(policy, 'init_hidden_state'):
policy.init_hidden_state()
#speeds = [(0, 0), (0.5, 0), (2.0, 0)]
speeds = list(zip(np.array(range(0, 350)) / 100, | np.zeros(350) | numpy.zeros |
#!/usr/bin/python
#
# Inline, Crossline, True dip, Dip Azimuth or Coherency using the 3D complex trace envelope weighted phase structure tensor
# Derivatives calulated using Kroon's 3 point filter
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Inputs': ['Input'],
'Output': ['Crl_dip', 'Inl_dip', 'True Dip', 'Dip Azimuth', 'Cplane'],
'ZSampMargin' : {'Value':[-15,15], 'Minimum':[-2,2], 'Symmetric': True},
'StepOut' : {'Value': [2,2], 'Minimum': [2,2], 'Symmetric': True},
'Par_0' : {'Name': 'Tensor ZStepOut', 'Value': 1},
'Par_1' : {'Name': 'Band', 'Value': 0.9},
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/external_attributes/DipandAzimuth.html'
}
#
# Define the compute function
#
def doCompute():
xs = xa.SI['nrinl']
ys = xa.SI['nrcrl']
zs = min(2*int(xa.params['Par_0']['Value'])+1,3)
kernel = xl.getGaussian(xs-2, ys-2, zs-2)
hxs = xs//2
hys = ys//2
inlFactor = xa.SI['zstep']/xa.SI['inldist'] * xa.SI['dipFactor']
crlFactor = xa.SI['zstep']/xa.SI['crldist'] * xa.SI['dipFactor']
N = xa.params['ZSampMargin']['Value'][1]
band = xa.params['Par_1']['Value']
hilbkernel = xl.hilbert_kernel(N, band)
while True:
xa.doInput()
indata = xa.Input['Input']
#
# Analytic Signal
#
ansig = np.apply_along_axis(np.convolve,-1, indata, hilbkernel, mode="same")
sr = np.real(ansig)
si = np.imag(ansig)
#
# Compute partial derivatives
#
sx = xl.kroon3( sr, axis=0 )
sy = xl.kroon3( sr, axis=1 )
sz = xl.kroon3( sr, axis=2 )
shx = xl.kroon3( si, axis=0 )
shy = xl.kroon3( si, axis=1 )
shz = xl.kroon3( si, axis=2 )
px = sr[1:xs-1,1:ys-1,:] * shx[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sx[1:xs-1,1:ys-1,:]
py = sr[1:xs-1,1:ys-1,:] * shy[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sy[1:xs-1,1:ys-1,:]
pz = sr[1:xs-1,1:ys-1,:] * shz[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sz[1:xs-1,1:ys-1,:]
#
# Inner product of gradients
px2 = px * px
py2 = py * py
pz2 = pz * pz
pxpy = px * py
pxpz = px * pz
pypz = py * pz
#
# Outer smoothing
rgx2 = xl.sconvolve(px2, kernel)
rgy2 = xl.sconvolve(py2, kernel)
rgz2 = xl.sconvolve(pz2, kernel)
rgxgy = xl.sconvolve(pxpy, kernel)
rgxgz = xl.sconvolve(pxpz, kernel)
rgygz = xl.sconvolve(pypz, kernel)
#
# Form the structure tensor
T = np.rollaxis(np.array([ [rgx2, rgxgy, rgxgz],
[rgxgy, rgy2, rgygz],
[rgxgz, rgygz, rgz2 ]]), 2)
#
# Get the eigenvalues and eigen vectors and calculate the dips
evals, evecs = | np.linalg.eigh(T) | numpy.linalg.eigh |
from __future__ import division
import glob
import numpy as NP
from functools import reduce
import numpy.ma as MA
import progressbar as PGB
import h5py
import healpy as HP
import warnings
import copy
import astropy.cosmology as CP
from astropy.time import Time, TimeDelta
from astropy.io import fits
from astropy import units as U
from astropy import constants as FCNST
from scipy import interpolate
from astroutils import DSP_modules as DSP
from astroutils import constants as CNST
from astroutils import nonmathops as NMO
from astroutils import mathops as OPS
from astroutils import lookup_operations as LKP
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import delay_spectrum as DS
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
prisim_path = prisim.__path__[0]+'/'
cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology
cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc
################################################################################
def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix,
triads=None, bltriplet=None,
hdf5file_prefix=None, infmt='npz',
datakey='noisy', blltol=0.1):
"""
----------------------------------------------------------------------------
Write closure phases computed in a PRISim simulation to a NPZ file with
appropriate format for further analysis.
Inputs:
infile_prefix
[string] HDF5 file or NPZ file created by a PRISim simulation or
its replication respectively. If infmt is specified as 'hdf5',
then hdf5file_prefix will be ignored and all the observing
info will be read from here. If infmt is specified as 'npz',
then hdf5file_prefix needs to be specified in order to read the
observing parameters.
triads [list or numpy array or None] Antenna triads given as a list of
3-element lists or a ntriads x 3 array. Each element in the
inner list is an antenna label. They will be converted to
strings internally. If set to None, then all triads determined
by bltriplet will be used. If specified, then inputs in blltol
and bltriplet will be ignored.
bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline
vectors. The first axis denotes the three baselines, the second
axis denotes the East, North, Up coordinates of the baseline
vector. Units are in m. Will be used only if triads is set to
None.
outfile_prefix
[string] Prefix of the NPZ file. It will be appended by
'_noiseless', '_noisy', and '_noise' and further by extension
'.npz'
infmt [string] Format of the input file containing visibilities.
Accepted values are 'npz' (default), and 'hdf5'. If infmt is
specified as 'npz', then hdf5file_prefix also needs to be
specified for reading the observing parameters
datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or
'noise' -- visibilities are to be written to the output. If set
to None, and infmt is 'hdf5', then all three sets of
visibilities are written. The datakey string will also be added
as a suffix in the output file.
blltol [scalar] Baseline length tolerance (in m) for matching baseline
vectors in triads. It must be a scalar. Default = 0.1 m. Will
be used only if triads is set to None and bltriplet is to be
used.
----------------------------------------------------------------------------
"""
if not isinstance(infile_prefix, str):
raise TypeError('Input infile_prefix must be a string')
if not isinstance(outfile_prefix, str):
raise TypeError('Input outfile_prefix must be a string')
if (triads is None) and (bltriplet is None):
raise ValueError('One of triads or bltriplet must be set')
if triads is None:
if not isinstance(bltriplet, NP.ndarray):
raise TypeError('Input bltriplet must be a numpy array')
if not isinstance(blltol, (int,float)):
raise TypeError('Input blltol must be a scalar')
if bltriplet.ndim != 2:
raise ValueError('Input bltriplet must be a 2D numpy array')
if bltriplet.shape[0] != 3:
raise ValueError('Input bltriplet must contain three baseline vectors')
if bltriplet.shape[1] != 3:
raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame')
else:
if not isinstance(triads, (list, NP.ndarray)):
raise TypeError('Input triads must be a list or numpy array')
triads = NP.asarray(triads).astype(str)
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input file format must be npz or hdf5')
if infmt.lower() == 'npz':
if not isinstance(hdf5file_prefix, str):
raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information')
if datakey is None:
datakey = ['noisy']
if isinstance(datakey, str):
datakey = [datakey]
elif not isinstance(datakey, list):
raise TypeError('Input datakey must be a list')
for dkey in datakey:
if dkey.lower() not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input found in datakey')
if infmt.lower() == 'hdf5':
fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower())
fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension]
else:
fullfnames_without_extension = [infile_prefix]
if len(fullfnames_without_extension) == 0:
raise IOError('No input files found with pattern {0}'.format(infile_prefix))
try:
if infmt.lower() == 'hdf5':
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0])
else:
simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix)
except:
raise IOError('Input PRISim file does not contain a valid PRISim output')
latitude = simvis.latitude
longitude = simvis.longitude
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day
last = last.reshape(-1,1)
daydata = NP.asarray(simvis.timestamp[0]).ravel()
if infmt.lower() == 'npz':
simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower())
skyvis = simvisinfo['noiseless'][0,...]
vis = simvisinfo['noisy']
noise = simvisinfo['noise']
n_realize = vis.shape[0]
else:
n_realize = len(fullfnames_without_extension)
cpdata = {}
outfile = {}
for fileind in range(n_realize):
if infmt.lower() == 'npz':
simvis.vis_freq = vis[fileind,...]
simvis.vis_noise_freq = noise[fileind,...]
else:
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind])
if fileind == 0:
if triads is None:
triads, bltriplets = simvis.getThreePointCombinations(unique=False)
# triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3)
# bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets'])
triads = NP.asarray(triads).reshape(-1,3)
bltriplets = NP.asarray(bltriplets)
blinds = []
matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
revind = []
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
revind += [blnum]
if len(revind) > 0:
flip_factor = NP.ones(3, dtype=NP.float)
flip_factor[NP.array(revind)] = -1
rev_bltriplet = bltriplet * flip_factor.reshape(-1,1)
matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
raise ValueError('Some baselines in the triplet are not found in the model triads')
triadinds = []
for blnum in NP.arange(bltriplet.shape[0]):
triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1]))
triadinds += [triadind]
triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2]))
if triadind_intersection.size == 0:
raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.')
triads = triads[triadind_intersection,:]
selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3)
prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(),
delay_filter_info=None,
specsmooth_info=None,
spectral_window_info=None,
unique=False)
if fileind == 0:
triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips)
for outkey in datakey:
if fileind == 0:
outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey)
if outkey == 'noiseless':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0)
if outkey == 'noisy':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0)
if outkey == 'noise':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:]
cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0)
for outkey in datakey:
cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0)
flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool)
NP.savez_compressed(outfile[outkey], closures=cpdata[outkey],
flags=flagsdata, triads=triads,
last=last+NP.zeros((1,n_realize)),
days=daydata+NP.arange(n_realize))
################################################################################
def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
return a dictionary
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
Output:
cpinfo [dictionary] Contains one top level keys, namely, 'raw'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan), and some other optional keys
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
cpinfo = {}
datapool = ['raw']
for dpool in datapool:
cpinfo[dpool] = {}
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
if qty == 'cphase':
cpinfo[dpool][qty] = NP.copy(cp)
elif qty == 'triads':
cpinfo[dpool][qty] = NP.copy(triadsdata)
elif qty == 'flags':
cpinfo[dpool][qty] = NP.copy(flags)
elif qty == 'lst':
cpinfo[dpool][qty] = NP.copy(lstHA)
elif qty == 'lst-day':
cpinfo[dpool][qty] = NP.copy(lstday.jd)
elif qty == 'days':
cpinfo[dpool][qty] = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_lst)
return cpinfo
################################################################################
def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0,
lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
save it to HDF5 format
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
ehich is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
hdf5file [string] Output HDF5 file including full path.
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
if 'averaged_closures' in npzdata:
day_avg_cpdata = npzdata['averaged_closures']
cp_dayavg = day_avg_cpdata.astype(NP.float64)
if 'std_dev_triad' in npzdata:
std_triads_cpdata = npzdata['std_dev_triad']
cp_std_triads = std_triads_cpdata.astype(NP.float64)
if 'std_dev_lst' in npzdata:
std_lst_cpdata = npzdata['std_dev_lst']
cp_std_lst = std_lst_cpdata.astype(NP.float64)
with h5py.File(hdf5file, 'w') as fobj:
datapool = ['raw']
for dpool in datapool:
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
data = None
if qty == 'cphase':
data = NP.copy(cp)
elif qty == 'triads':
data = NP.copy(triadsdata)
elif qty == 'flags':
data = NP.copy(flags)
elif qty == 'lst':
data = NP.copy(lstHA)
elif qty == 'lst-day':
data = NP.copy(lstday.jd)
elif qty == 'days':
data = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
data = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
data = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
data = NP.copy(cp_std_lst)
if data is not None:
dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9)
################################################################################
def save_CPhase_cross_power_spectrum(xcpdps, outfile):
"""
----------------------------------------------------------------------------
Save cross-power spectrum information in a dictionary to a HDF5 file
Inputs:
xcpdps [dictionary] This dictionary is essentially an output of the
member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
with h5py.File(outfile, 'w') as fileobj:
hdrgrp = fileobj.create_group('header')
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
dset = hdrgrp.create_dataset(key, data=xcpdps[key])
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
smplgrp = fileobj.create_group(smplng)
for key in sampling_keys:
dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key])
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
dpoolgrp = smplgrp.create_group(dpool)
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][key], dict):
subgrp = dpoolgrp.create_group(key)
for subkey in xcpdps[smplng][dpool][key]:
dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey])
else:
dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key])
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][stat], list):
for ii in range(len(xcpdps[smplng][dpool][stat])):
dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit)
else:
dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit)
################################################################################
def read_CPhase_cross_power_spectrum(infile):
"""
----------------------------------------------------------------------------
Read information about cross power spectrum from an external HDF5 file into
a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum()
Input:
infile [string] Full path to the external HDF5 file that contains info
about cross-power spectrum.
Output:
xcpdps [dictionary] This dictionary has structure the same as output
of the member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
xcpdps = {}
with h5py.File(infile, 'r') as fileobj:
hdrgrp = fileobj['header']
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
xcpdps[key] = hdrgrp[key].value
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in fileobj:
smplgrp = fileobj[smplng]
xcpdps[smplng] = {}
for key in sampling_keys:
xcpdps[smplng][key] = smplgrp[key].value
for dpool in dpool_keys:
if dpool in smplgrp:
xcpdps[smplng][dpool] = {}
dpoolgrp = smplgrp[dpool]
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in dpoolgrp:
if isinstance(dpoolgrp[key], h5py.Group):
xcpdps[smplng][dpool][key] = {}
for subkey in dpoolgrp[key]:
xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value
elif isinstance(dpoolgrp[key], h5py.Dataset):
xcpdps[smplng][dpool][key] = dpoolgrp[key].value
else:
raise TypeError('Invalid h5py data type encountered')
for stat in ['mean', 'median']:
if stat in dpoolgrp:
if isinstance(dpoolgrp[stat], h5py.Dataset):
valunits = dpoolgrp[stat].attrs['units']
xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits)
elif isinstance(dpoolgrp[stat], h5py.Group):
xcpdps[smplng][dpool][stat] = []
for diagcomb_ind in range(len(dpoolgrp[stat].keys())):
if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]:
valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units']
xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)]
return xcpdps
################################################################################
def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None):
"""
----------------------------------------------------------------------------
Perform incoherent averaging of cross power spectrum along specified axes
Inputs:
xcpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information coming possible from different sources, and they
will be averaged be averaged incoherently. If a single
dictionary is provided instead of a list of dictionaries, the
said averaging does not take place. Each dictionary is
essentially an output of the member function
compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It
has the following key-value structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and
'residual' each of which is a dictionary. 'whole' contains power
spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained
as a difference between 'whole' and 'submodel'. It contains the
following keys and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
excpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information of subsample differences coming possible from
different sources, and they will be averaged be averaged
incoherently. This is optional. If not set (default=None), no
incoherent averaging happens. If a single dictionary is provided
instead of a list of dictionaries, the said averaging does not
take place. Each dictionary is essentially an output of the
member function compute_power_spectrum_uncertainty() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,)
array), 'dday' ((ndaycomb,) array), 'oversampled' and
'resampled' corresponding to whether resample was set to False
or True in call to member function FT(). Values under keys
'triads_ind' and 'lst_ind' are numpy array corresponding to
triad and time indices used in selecting the data. Values under
keys 'oversampled' and 'resampled' each contain a dictionary
with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of
the frequency subbands of the subband delay spectra. It
is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform.
It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary.
It contains information about power spectrum uncertainties
obtained from subsample differences. It contains the following
keys and values:
'mean' [numpy array] Delay power spectrum uncertainties
incoherently estimated over the axes specified in
xinfo['axes'] using the 'mean' key in input cpds or
attribute cPhaseDS['errinfo']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties
incoherently averaged over the axes specified in incohax
using the 'median' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends
on the combination of input parameters. See examples
below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets
for those axes. If 'avgcov' was set, those entries will
be removed from 'diagoffsets' since all the leading
diagonal elements have been collapsed (averaged) further.
Value under each key is a numpy array where each element
in the array corresponds to the index of that leading
diagonal. This should match the size of the output along
that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
diagoffsets [NoneType or dictionary or list of dictionaries] This info is
used for incoherent averaging along specified diagonals along
specified axes. This incoherent averaging is performed after
incoherently averaging multiple cross-power spectra (if any).
If set to None, this incoherent averaging is not performed.
Many combinations of axes and diagonals can be specified as
individual dictionaries in a list. If only one dictionary is
specified, then it assumed that only one combination of axes
and diagonals is requested. If a list of dictionaries is given,
each dictionary in the list specifies a different combination
for incoherent averaging. Each dictionary should have the
following key-value pairs. The key is the axis number (allowed
values are 1, 2, 3) that denote the axis type (1=LST, 2=Days,
3=Triads to be averaged), and the value under they keys is a
list or numpy array of diagonals to be averaged incoherently.
These axes-diagonal combinations apply to both the inputs
xcpdps and excpdps, except axis=2 does not apply to excpdps
(since it is made of subsample differences already) and will be
skipped.
Outputs:
A tuple consisting of two dictionaries. The first dictionary contains the
incoherent averaging of xcpdps as specified by the inputs, while the second
consists of incoherent of excpdps as specified by the inputs. The structure
of these dictionaries are practically the same as the dictionary inputs
xcpdps and excpdps respectively. The only differences in dictionary
structure are:
* Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual'
/'effinfo']['mean'/'median'] is a list of numpy arrays, where each
array in the list corresponds to the dictionary in the list in input
diagoffsets that defines the axes-diagonal combination.
----------------------------------------------------------------------------
"""
if isinstance(xcpdps, dict):
xcpdps = [xcpdps]
if not isinstance(xcpdps, list):
raise TypeError('Invalid data type provided for input xcpdps')
if excpdps is not None:
if isinstance(excpdps, dict):
excpdps = [excpdps]
if not isinstance(excpdps, list):
raise TypeError('Invalid data type provided for input excpdps')
if len(xcpdps) != len(excpdps):
raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values')
out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']}
out_excpdps = None
if excpdps is not None:
out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']}
for smplng in ['oversampled', 'resampled']:
if smplng in xcpdps[0]:
out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']}
if excpdps is not None:
out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']}
for dpool in ['whole', 'submodel', 'residual']:
if dpool in xcpdps[0][smplng]:
out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in xcpdps[0][smplng][dpool]:
out_xcpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(xcpdps)):
arr += [xcpdps[i][smplng][dpool][stat].si.value]
arr_units = xcpdps[i][smplng][dpool][stat].si.unit
if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in xcpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_xcpdps[smplng][dpool][stat] = arr
out_xcpdps[smplng][dpool]['diagweights'] = diagweights
for dpool in ['errinfo']:
if dpool in excpdps[0][smplng]:
out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in excpdps[0][smplng][dpool]:
out_excpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(excpdps)):
arr += [excpdps[i][smplng][dpool][stat].si.value]
arr_units = excpdps[i][smplng][dpool][stat].si.unit
if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in excpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_excpdps[smplng][dpool][stat] = arr
out_excpdps[smplng][dpool]['diagweights'] = diagweights
if diagoffsets is not None:
if isinstance(diagoffsets, dict):
diagoffsets = [diagoffsets]
if not isinstance(diagoffsets, list):
raise TypeError('Input diagoffsets must be a list of dictionaries')
for ind in range(len(diagoffsets)):
for ax in diagoffsets[ind]:
if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)):
raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array')
diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax])
for smplng in ['oversampled', 'resampled']:
if smplng in out_xcpdps:
for dpool in ['whole', 'submodel', 'residual']:
if dpool in out_xcpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights'])
out_xcpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_xcpdps[smplng][dpool]:
arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value)
arr_units = out_xcpdps[smplng][dpool][stat].si.unit
out_xcpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]])
out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
if excpdps is not None:
for smplng in ['oversampled', 'resampled']:
if smplng in out_excpdps:
for dpool in ['errinfo']:
if dpool in out_excpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
if ax != 2:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights'])
out_excpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_excpdps[smplng][dpool]:
arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value)
arr_units = out_excpdps[smplng][dpool][stat].si.unit
out_excpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2])
out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
return (out_xcpdps, out_excpdps)
################################################################################
def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'):
"""
----------------------------------------------------------------------------
Averages the power spectrum incoherently by binning in bins of k. Returns
the power spectrum in units of both standard power spectrum and \Delta^2
Inputs:
xcpdps [dictionary] A dictionary that contains the incoherent averaged
power spectrum along LST and/or triads axes. This dictionary is
essentially the one(s) returned as the output of the function
incoherent_cross_power_spectrum_average()
kbins [NoneType, list or numpy array] Bins in k. If set to None
(default), it will be determined automatically based on the
inputs in num_kbins, and kbintype. If num_kbins is None and
kbintype='linear', the negative and positive values of k are
folded into a one-sided power spectrum. In this case, the
bins will approximately have the same resolution as the k-values
in the input power spectrum for all the spectral windows.
num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is
set to None. If kbintype is set to 'linear', the negative and
positive values of k are folded into a one-sided power spectrum.
In this case, the bins will approximately have the same
resolution as the k-values in the input power spectrum for all
the spectral windows.
kbintype [string] Specifies the type of binning, used only if kbins is
set to None. Accepted values are 'linear' and 'log' for linear
and logarithmic bins respectively.
Outputs:
Dictionary containing the power spectrum information. At the top level, it
contains keys specifying the sampling to be 'oversampled' or 'resampled'.
Under each of these keys is another dictionary containing the following
keys:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
or one key named 'errinfo' each of which is a dictionary. 'whole'
contains power spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained as a
difference between 'whole' and 'submodel'. 'errinfo' contains power
spectrum information about the subsample differences. There is also
another dictionary under key 'kbininfo' that contains information about
k-bins. These dictionaries contain the following keys and values:
'whole'/'submodel'/'residual'/'errinfo'
[dictionary] It contains the following keys and values:
'mean' [dictionary] Delay power spectrum information under the
'mean' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'median'
[dictionary] Delay power spectrum information under the
'median' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'kbininfo'
[dictionary] Contains the k-bin information. It contains the
following key-value pairs:
'counts'
[list] List of numpy arrays where each numpy array in the stores
the counts in the determined k-bins. Each numpy array in the
list corresponds to a spectral window (redshift subband). The
shape of each numpy array is (nkbins,)
'kbin_edges'
[list] List of numpy arrays where each numpy array contains the
k-bin edges. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nkbins+1,).
'kbinnum'
[list] List of numpy arrays containing the bin number under
which the k value falls. Each array in the list corresponds to
a spectral window (redshift subband). The shape of each array
is (nlags,).
'ri'
[list] List of numpy arrays containing the reverse indices for
each k-bin. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nlags+nkbins+1,).
'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info
estimated for the different datapools under different stats
and PS definitions. It has the keys 'mean' and 'median' for the
mean and median statistic respectively. Each of them contain a
dictionary with the following key-value pairs:
'PS' [list] List of numpy arrays where each numpy array
contains a standard power spectrum typically in units of
'K2 Mpc3'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
'Del2' [list] List of numpy arrays where each numpy array
contains a Delta^2 power spectrum typically in units of
'K2'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
if kbins is not None:
if not isinstance(kbins, (list,NP.ndarray)):
raise TypeError('Input kbins must be a list or numpy array')
else:
if not isinstance(kbintype, str):
raise TypeError('Input kbintype must be a string')
if kbintype.lower() not in ['linear', 'log']:
raise ValueError('Input kbintype must be set to "linear" or "log"')
if kbintype.lower() == 'log':
if num_kbins is None:
num_kbins = 10
psinfo = {}
keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in keys:
psinfo[key] = xcpdps[key]
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
psinfo[smplng] = {}
for key in sampling_keys:
psinfo[smplng][key] = xcpdps[smplng][key]
kprll = xcpdps[smplng]['kprll']
lags = xcpdps[smplng]['lags']
eps = 1e-10
if kbins is None:
dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1))
if kbintype.lower() == 'linear':
bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True)
else:
bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True)
bins_kprll = NP.insert(bins_kprll, 0, -eps)
else:
bins_kprll = NP.asarray(kbins)
num_kbins = bins_kprll.size - 1
psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []}
for spw in range(kprll.shape[0]):
counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll)
counts = counts.astype(NP.int)
psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)]
psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc]
psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)]
psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)]
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
psinfo[smplng][dpool] = {}
psinfo[smplng]['kbininfo'][dpool] = {}
keys = ['diagoffsets', 'diagweights', 'axesmap']
for key in keys:
psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key]
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []}
psinfo[smplng]['kbininfo'][dpool][stat] = []
for combi in range(len(xcpdps[smplng][dpool][stat])):
outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape)
outshape[-1] = num_kbins
tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit)
tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3)
tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc
for spw in range(kprll.shape[0]):
counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw])
ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw])
print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start()
for binnum in range(num_kbins):
if counts[binnum] > 0:
ind_kbin = ri[ri[binnum]:ri[binnum+1]]
tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1)
k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int)
k_shape[-1] = -1
tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2)
tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1)
progress.update(binnum+1)
progress.finish()
psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)]
psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)]
psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)]
return psinfo
################################################################################
class ClosurePhase(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
extfile [string] Full path to external file containing information
of ClosurePhase instance. The file is in HDF5 format
cpinfo [dictionary] Contains the following top level keys,
namely, 'raw', 'processed', and 'errinfo'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan).
Under the 'processed' key are more subkeys, namely,
'native', 'prelim', and optionally 'submodel' and 'residual'
each holding a dictionary.
Under 'native' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked
array: (nlst,ndays,ntriads,nchan)).
Under 'prelim' dictionary, the subsubkeys for further
dictionaries are 'tbins' (numpy array of tbin centers
after smoothing), 'dtbins' (numpy array of tbin
intervals), 'wts' (masked array:
(ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'.
The dictionaries under 'eicp' are indexed by keys
'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
'median' (masked array: (ntbins,ndays,ntriads,nchan)),
'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and
'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The
last one denotes Median Absolute Deviation.
Under 'submodel' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)).
Under 'residual' dictionary, the subsubkeys for further
dictionaries are 'cphase' and 'eicp'. These are
dictionaries too. The dictionaries under 'eicp' are
indexed by keys 'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
and 'median' (masked array:
(ntbins,ndays,ntriads,nchan)).
Under key 'errinfo', it contains the following keys and
values:
'list_of_pair_of_pairs'
List of pair of pairs for which differences of
complex exponentials have been computed, where the
elements are bins of days. The number of elements
in the list is ncomb. And each element is a smaller
(4-element) list of pair of pairs
'eicp_diff'
Difference of complex exponentials between pairs
of day bins. This will be used in evaluating noise
properties in power spectrum. It is a dictionary
with two keys '0' and '1' where each contains the
difference from a pair of subsamples. Each of these
keys contains a numpy array of shape
(nlstbins,ncomb,2,ntriads,nchan)
'wts' Weights in difference of complex exponentials
obtained by sum of squares of weights that are
associated with the pair that was used in the
differencing. It is a dictionary with two keys '0'
and '1' where each contains the weights associated
It is of shape (nlstbins,ncomb,2,ntriads,nchan)
Member functions:
__init__() Initialize an instance of class ClosurePhase
expicp() Compute and return complex exponential of the closure phase
as a masked array
smooth_in_tbins()
Smooth the complex exponentials of closure phases in LST
bins. Both mean and median smoothing is produced.
subtract() Subtract complex exponential of the bispectrum phase
from the current instance and updates the cpinfo attribute
subsample_differencing()
Create subsamples and differences between subsamples to
evaluate noise properties from the data set.
save() Save contents of attribute cpinfo in external HDF5 file
----------------------------------------------------------------------------
"""
def __init__(self, infile, freqs, infmt='npz'):
"""
------------------------------------------------------------------------
Initialize an instance of class ClosurePhase
Inputs:
infile [string] Input file including full path. It could be a NPZ
with raw data, or a HDF5 file that could contain raw or
processed data. The input file format is specified in the
input infmt. If it is a NPZ file, it must contain the
following keys/files:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA
units which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is
(nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard
deviation of closure phases across days. Shape
is (nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard
deviation of closure phases across triads.
Shape is (nlst,ndays,nchan)
freqs [numpy array] Frequencies (in Hz) in the input. Size is
nchan.
infmt [string] Input file format. Accepted values are 'npz'
(default) and 'hdf5'.
------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
if not isinstance(freqs, NP.ndarray):
raise TypeError('Input freqs must be a numpy array')
freqs = freqs.ravel()
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input infmt must be "npz" or "hdf5"')
if infmt.lower() == 'npz':
infilesplit = infile.split('.npz')
infile_noext = infilesplit[0]
self.cpinfo = loadnpz(infile)
# npz2hdf5(infile, infile_noext+'.hdf5')
self.extfile = infile_noext + '.hdf5'
else:
# if not isinstance(infile, h5py.File):
# raise TypeError('Input infile is not a valid HDF5 file')
self.extfile = infile
self.cpinfo = NMO.load_dict_from_hdf5(self.extfile)
if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]:
raise ValueError('Input frequencies do not match with dimensions of the closure phase data')
self.f = freqs
self.df = freqs[1] - freqs[0]
force_expicp = False
if 'processed' not in self.cpinfo:
force_expicp = True
else:
if 'native' not in self.cpinfo['processed']:
force_expicp = True
self.expicp(force_action=force_expicp)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['errinfo'] = {}
############################################################################
def expicp(self, force_action=False):
"""
------------------------------------------------------------------------
Compute the complex exponential of the closure phase as a masked array
Inputs:
force_action [boolean] If set to False (default), the complex
exponential is computed only if it has not been done so
already. Otherwise the computation is forced.
------------------------------------------------------------------------
"""
if 'processed' not in self.cpinfo:
self.cpinfo['processed'] = {}
force_action = True
if 'native' not in self.cpinfo['processed']:
self.cpinfo['processed']['native'] = {}
force_action = True
if 'cphase' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags'])
force_action = True
if not force_action:
if 'eicp' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
else:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
############################################################################
def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None):
"""
------------------------------------------------------------------------
Smooth the complex exponentials of closure phases in time bins. Both
mean and median smoothing is produced.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value.
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins > 1:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
daybinintervals = NP.asarray(daybinsize).reshape(-1)
daybincenters = daybins[0] + 0.5 * daybinintervals
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['eicp'] = {}
# self.cpinfo['processed']['prelim']['cphase'] = {}
# self.cpinfo['processed']['prelim']['daybins'] = daybincenters
# self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
# mask = wts_daybins <= 0.0
# self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins <= 0:
raise ValueError('Input ndaybins must be positive')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['eicp'] = {}
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['daybins'] = daybincenters
self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
mask = wts_daybins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution, data and weights
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['processed']['prelim']['lstbins'][0] += eps
self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
if 'wts' not in self.cpinfo['processed']['prelim']:
outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
else:
outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = | NP.zeros(outshape, dtype=NP.complex128) | numpy.zeros |
import numpy as np
from scipy.stats import norm
from scipy import stats
counter = 0
class AcquisitionFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, acq):
self.acq=acq
acq_name=acq['name']
ListAcq=['bucb','ucb', 'ei', 'poi','random','thompson', 'mu',
'pure_exploration','kov_mes','mes','kov_ei',
'kov_erm','kov_cbm','kov_tgp','kov_tgp_ei']
# check valid acquisition function
IsTrue=[val for idx,val in enumerate(ListAcq) if val in acq_name]
#if not in acq_name:
if IsTrue == []:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(acq_name)
raise NotImplementedError(err)
else:
self.acq_name = acq_name
self.dim=acq['dim']
if 'scalebounds' not in acq:
self.scalebounds=[0,1]*self.dim
else:
self.scalebounds=acq['scalebounds']
# vector theta for thompson sampling
#self.flagTheta_TS=0
self.initialized_flag=0
self.objects=[]
def acq_kind(self, x, gp):
y_max=np.max(gp.Y)
if np.any(np.isnan(x)):
return 0
if self.acq_name == 'bucb':
return self._bucb(x, gp, self.acq['kappa'])
if self.acq_name == 'ucb':
return self._ucb(x, gp)
if self.acq_name=='kov_cbm':
return self._cbm(x,gp,target=self.acq['fstar_scaled'])
if self.acq_name == 'lcb':
return self._lcb(x, gp)
if self.acq_name == 'ei' or self.acq_name=='kov_tgp_ei':
return self._ei(x, gp, y_max)
if self.acq_name == 'kov_ei' :
return self._ei(x, gp, y_max=self.acq['fstar_scaled'])
if self.acq_name == 'kov_erm' or self.acq_name =='kov_tgp' or self.acq_name=='kov_ei_cb':
return self._erm(x, gp, fstar=self.acq['fstar_scaled'])
if self.acq_name == 'pure_exploration':
return self._pure_exploration(x, gp)
if self.acq_name == 'ei_mu':
return self._ei(x, gp, y_max)
if self.acq_name == 'mu':
return self._mu(x, gp)
if self.acq_name == 'mes':
if self.initialized_flag==0:
self.object=AcquisitionFunction.MaxValueEntropySearch(gp,self.scalebounds,
ystars=self.acq['ystars'])
self.initialized_flag=1
return self.object(x)
else:
return self.object(x)
if self.acq_name == 'kov_mes':
if self.initialized_flag==0:
self.object=AcquisitionFunction.MaxValueEntropySearch(gp,self.scalebounds,
ystars=np.asarray([self.acq['fstar_scaled']]))
self.initialized_flag=1
return self.object(x)
else:
return self.object(x)
@staticmethod
def _mu(x, gp):
mean, var = gp.predict(x, eval_MSE=True)
mean=np.atleast_2d(mean).T
return mean
@staticmethod
def _lcb(x, gp):
mean, var = gp.predict(x, eval_MSE=True)
var.flags['WRITEABLE']=True
#var=var.copy()
var[var<1e-10]=0
mean=np.atleast_2d(mean).T
var=np.atleast_2d(var).T
#beta_t = gp.X.shape[1] * np.log(len(gp.Y))
beta_t = 2 * np.log(len(gp.Y));
return mean - np.sqrt(beta_t) * np.sqrt(var)
@staticmethod
def _ucb(x, gp):
mean, var = gp.predict(x, eval_MSE=True)
var.flags['WRITEABLE']=True
#var=var.copy()
var[var<1e-10]=0
mean=np.atleast_2d(mean).T
var=np.atleast_2d(var).T
# Linear in D, log in t https://github.com/kirthevasank/add-gp-bandits/blob/master/BOLibkky/getUCBUtility.m
#beta_t = gp.X.shape[1] * np.log(len(gp.Y))
beta_t = np.log(len(gp.Y))
#beta=300*0.1*np.log(5*len(gp.Y))# delta=0.2, gamma_t=0.1
return mean + np.sqrt(beta_t) * np.sqrt(var)
@staticmethod
def _cbm(x, gp, target): # confidence bound minimization
mean, var = gp.predict(x, eval_MSE=True)
var.flags['WRITEABLE']=True
var[var<1e-10]=0
mean=np.atleast_2d(mean).T
var=np.atleast_2d(var).T
# Linear in D, log in t https://github.com/kirthevasank/add-gp-bandits/blob/master/BOLibkky/getUCBUtility.m
#beta_t = gp.X.shape[1] * np.log(len(gp.Y))
beta_t = np.log(len(gp.Y))
#beta=300*0.1*np.log(5*len(gp.Y))# delta=0.2, gamma_t=0.1
return -np.abs(mean-target) - np.sqrt(beta_t) * np.sqrt(var)
@staticmethod
def _erm(x, gp, fstar):
#y_max=np.asscalar(y_max)
mean, var = gp.predict(x, eval_MSE=True)
if mean.shape!=var.shape:
print("bug")
mean, var = gp.predict(x, eval_MSE=True)
var2 = np.maximum(var, 1e-10 + 0 * var)
z = (mean - fstar)/np.sqrt(var2)
out=(fstar-mean) * (1-norm.cdf(z)) + np.sqrt(var2) * norm.pdf(z)
out[var2<1e-10]=0
#print(out)
if any(out)<0:
print("out<0")
return -1*out # for minimization problem
@staticmethod
def _bucb(x, gp, kappa):
mean, var = gp.predict_bucb(x, eval_MSE=True)
var.flags['WRITEABLE']=True
#var=var.copy()
var[var<1e-10]=0
mean=np.atleast_2d(mean).T
var=np.atleast_2d(var).T
return mean + kappa * np.sqrt(var)
@staticmethod
def _ei(x, gp, y_max):
#y_max=np.asscalar(y_max)
mean, var = gp.predict(x, eval_MSE=True)
if gp.nGP==0:
var2 = np.maximum(var, 1e-10 + 0 * var)
z = (mean - y_max)/np.sqrt(var2)
out=(mean - y_max) * norm.cdf(z) + np.sqrt(var2) * norm.pdf(z)
out[var2<1e-10]=0
return out
else: # multiple GP
z=[None]*gp.nGP
out=[None]*gp.nGP
# Avoid points with zero variance
#y_max=y_max*0.8
for idx in range(gp.nGP):
var[idx] = np.maximum(var[idx], 1e-9 + 0 * var[idx])
z[idx] = (mean[idx] - y_max)/np.sqrt(var[idx])
out[idx]=(mean[idx] - y_max) * norm.cdf(z[idx]) + np.sqrt(var[idx]) * norm.pdf(z[idx])
if len(x)==1000:
return out
else:
return np.mean(out)# get mean over acquisition functions
return np.prod(out,axis=0) # get product over acquisition functions
@staticmethod
def _poi(x, gp,y_max): # run Predictive Entropy Search using Spearmint
mean, var = gp.predict(x, eval_MSE=True)
# Avoid points with zero variance
var = np.maximum(var, 1e-9 + 0 * var)
z = (mean - y_max)/np.sqrt(var)
return norm.cdf(z)
@staticmethod
def _poi_kov(x, gp,y_max): # POI with Known Optimal Value
mean, var = gp.predict(x, eval_MSE=True)
# Avoid points with zero variance
var = | np.maximum(var, 1e-9 + 0 * var) | numpy.maximum |
# Import required libraries
from turtle import window_width
import pandas as pd
import dash
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import os
import sys
from dash import html, dcc
from dash.dependencies import Input, Output
pp=os.path.dirname(os.path.abspath(__file__))
pp = os.path.dirname(pp)
# This appends the path of parent folder to the global path of program
# Try not to use this anymore
sys.path.append(pp)
from utils import generalized_hist_v2
# Read the sales data into pandas dataframe
df_items = pd.read_csv('../data/items.csv')
df_categories = pd.read_csv('../data/item_categories.csv')
df_shops = pd.read_csv('../data/shops.csv')
df_sales = pd.read_csv('../data/sales_train.csv')
df_sales_test = pd.read_csv('../data/test.csv')
# Add revenue info
df_sales['revenue'] = df_sales['item_price'] * df_sales['item_cnt_day']
# For convenience add category information to the sales data
df_sales['item_category_id'] = df_sales['item_id'].map(df_items['item_category_id'])
# Dictionary of functions to give appropriate title
site_to_title = {
'date_block': lambda x: f'Total Number of {x} in each month',
'item': lambda x: f'Total Number of {x} by Item',
'category': lambda x: f'Total Number of {x} by Category',
'shop': lambda x: f'Total Number of {x} by Shopping Store',
'outlier': lambda x: f'Outliers in {"Price" if x=="Sales" else "Item_cnt_day"}',
}
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('Daily Sales Data',
style={'textAlign': 'center', 'color': '#3054D1',
'font-size': 40}),
# Drop down menu to select the type of page
dcc.Dropdown(id='site-dropdown',
options=[
{'label': 'Date Blocks', 'value': 'date_block'},
{'label': 'Items', 'value': 'item'},
{'label': 'Categories', 'value': 'category'},
{'label': 'Shopping Stores', 'value': 'shop'},
{'label': 'Outliers', 'value': 'outlier'},],
value='date_block',
placeholder="Select a Transaction Feature",
searchable=True),
html.Br(),
html.Div(dcc.Graph(id='num-transactions')),
html.Br(),
html.Div(dcc.Graph(id='num-sales')),
html.Br(),
html.Div(dcc.Graph(id='num-revenues')),
html.Br(),
])
#
# Function decorator to specify function input and output
@app.callback(Output(component_id='num-transactions', component_property='figure'),
Input(component_id='site-dropdown', component_property='value'))
def get_graph_transaction_num(entered_site):
''''
Returns Graph for the amount of number of transaction numbers
based on the entered_site
'''
filtered_df = df_sales
title = site_to_title[entered_site]("Transactions")
# Create figure object to put graph
fig = go.Figure(layout_title_text=title)
if entered_site == 'date_block':
filtered_df = filtered_df[['date','date_block_num']].groupby(['date_block_num']).count().reset_index()
# Add figures
fig.add_trace(go.Bar(x=filtered_df["date_block_num"], y=filtered_df['date']))
fig.add_trace(go.Scatter(x=filtered_df["date_block_num"], y=filtered_df["date"], mode='lines+markers'))
elif entered_site == "item":
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 10000
count, division = np.histogram(df_sales['item_id'], bins=bins_num)
width = 20*(division.max() - division.min()) / bins_num
# Add figures
fig.add_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
elif entered_site == "category":
filtered_df = filtered_df[['date','item_category_id']].groupby(['item_category_id']).count().reset_index()
fig.add_trace(go.Bar(x=filtered_df["item_category_id"], y=filtered_df['date']))
elif entered_site == "shop":
filtered_df = filtered_df[['date','shop_id']].groupby(['shop_id']).count().reset_index()
fig.add_trace(go.Bar(x=filtered_df["shop_id"], y=filtered_df['date']))
else:
filtered_df = filtered_df[['item_cnt_day']]
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 100
width = 1200 / bins_num
count, division = np.histogram(filtered_df['item_cnt_day'], bins=bins_num)
# Add figures
fig.add_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
fig.update_yaxes(title_text="y-axis (log scale)", type="log")
# Set the gap between histogram bars
fig.update_layout(bargap=0.2)
return fig
# Function decorator to specify function input and output
@app.callback(Output(component_id='num-sales', component_property='figure'),
Input(component_id='site-dropdown', component_property='value'))
def get_graph_sales_num(entered_site):
''''
Returns Graph for the amount of number of sales numbers
based on the entered_site
'''
filtered_df = df_sales
title = site_to_title[entered_site]("Sales")
fig = go.Figure(layout_title_text=title)
fig.update_layout(bargap=0.2)
if entered_site == 'date_block':
filtered_df = filtered_df[['item_cnt_day','date_block_num']].groupby(['date_block_num']).sum().reset_index()
fig.add_trace(go.Bar(x=filtered_df["date_block_num"], y=filtered_df['item_cnt_day']))
fig.add_trace(go.Scatter(x=filtered_df["date_block_num"], y=filtered_df["item_cnt_day"], mode='lines+markers'))
elif entered_site == "item":
# Adjust the width of bins based bins_num for visual convenience
bins_num = 1000
filtered_df = filtered_df[['item_cnt_day','item_id']].groupby(['item_id']).sum().to_dict()['item_cnt_day']
item_sales = df_items['item_id'].map(lambda x: filtered_df.get(x, 0)).reset_index()
item_sales.columns = ['item_id', 'item_cnt']
division, count = generalized_hist_v2(item_sales['item_id'], item_sales['item_cnt'], bins_num)
width = 2*(division.max() - division.min()) / bins_num
fig.add_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
elif entered_site == "category":
filtered_df = filtered_df[['item_cnt_day','item_category_id']].groupby(['item_category_id']).sum().reset_index()
fig.add_trace(go.Bar(x=filtered_df["item_category_id"], y=filtered_df['item_cnt_day']))
elif entered_site == "shop":
filtered_df = filtered_df[['item_cnt_day','shop_id']].groupby(['shop_id']).sum().reset_index()
fig.add_trace(go.Bar(x=filtered_df["shop_id"], y=filtered_df['item_cnt_day']))
else:
filtered_df = filtered_df[['item_price']]
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 100
width = 120000 / bins_num
count, division = | np.histogram(filtered_df['item_price'], bins=bins_num) | numpy.histogram |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module contains common utility functions for minddataset tests.
"""
import os
import re
import string
import collections
import pytest
import numpy as np
from mindspore.mindrecord import FileWriter
FILES_NUM = 4
CV_DIR_NAME = "../data/mindrecord/testImageNetData"
def get_data(dir_name):
"""
usage: get data from imagenet dataset
params:
dir_name: directory containing folder images and annotation information
"""
if not os.path.isdir(dir_name):
raise IOError("Directory {} does not exist".format(dir_name))
img_dir = os.path.join(dir_name, "images")
ann_file = os.path.join(dir_name, "annotation.txt")
with open(ann_file, "r") as file_reader:
lines = file_reader.readlines()
data_list = []
for i, line in enumerate(lines):
try:
filename, label = line.split(",")
label = label.strip("\n")
with open(os.path.join(img_dir, filename), "rb") as file_reader:
img = file_reader.read()
data_json = {"id": i,
"file_name": filename,
"data": img,
"label": int(label)}
data_list.append(data_json)
except FileNotFoundError:
continue
return data_list
def inputs(vectors, maxlen=50):
length = len(vectors)
if length > maxlen:
return vectors[0:maxlen], [1] * maxlen, [0] * maxlen
input_ = vectors + [0] * (maxlen - length)
mask = [1] * length + [0] * (maxlen - length)
segment = [0] * maxlen
return input_, mask, segment
def convert_to_uni(text):
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
raise Exception("The type %s does not convert!" % type(text))
def load_vocab(vocab_file):
"""load vocabulary to translate statement."""
vocab = collections.OrderedDict()
vocab.setdefault('blank', 2)
index = 0
with open(vocab_file) as reader:
while True:
tmp = reader.readline()
if not tmp:
break
token = convert_to_uni(tmp)
token = token.strip()
vocab[token] = index
index += 1
return vocab
def get_nlp_data(dir_name, vocab_file, num):
"""
Return raw data of aclImdb dataset.
Args:
dir_name (str): String of aclImdb dataset's path.
vocab_file (str): String of dictionary's path.
num (int): Number of sample.
Returns:
List
"""
if not os.path.isdir(dir_name):
raise IOError("Directory {} not exists".format(dir_name))
for root, _, files in os.walk(dir_name):
for index, file_name_extension in enumerate(files):
if index < num:
file_path = os.path.join(root, file_name_extension)
file_name, _ = file_name_extension.split('.', 1)
id_, rating = file_name.split('_', 1)
with open(file_path, 'r') as f:
raw_content = f.read()
dictionary = load_vocab(vocab_file)
vectors = [dictionary.get('[CLS]')]
vectors += [dictionary.get(i) if i in dictionary
else dictionary.get('[UNK]')
for i in re.findall(r"[\w']+|[{}]"
.format(string.punctuation),
raw_content)]
vectors += [dictionary.get('[SEP]')]
input_, mask, segment = inputs(vectors)
input_ids = np.reshape( | np.array(input_) | numpy.array |
from __future__ import division
import unittest
import pytest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from openmdao.api import IndepVarComp, Group, Problem
from openconcept.utilities.math.integrals import Integrator
class IntegratorTestGroup(Group):
"""An OpenMDAO group to test the every-node integrator component
Options
-------
num_nodes : int
Number of analysis points per segment
quantity_units : str
The units of the integral quantity q (NOT the rate)
diff_units : str
The units of the integrand (none by default)
integrator : str
Which integration scheme to use (default 'simpson')
"""
def initialize(self):
self.options.declare('quantity_units',default=None, desc="Units of the quantity being differentiated")
self.options.declare('diff_units',default=None, desc="Units of the differential")
self.options.declare('rate_units',default=None, desc="Units of the rate")
self.options.declare('num_nodes',default=11, desc="Number of nodes per segment")
self.options.declare('integrator',default='simpson', desc="Which simpson integrator to use")
self.options.declare('time_setup',default='dt')
self.options.declare('second_integrand',default=False)
self.options.declare('zero_start', default=False)
self.options.declare('final_only', default=False)
self.options.declare('test_auto_names', default=False)
self.options.declare('val', default=0.0)
def setup(self):
quantity_units = self.options['quantity_units']
diff_units = self.options['diff_units']
rate_units = self.options['rate_units']
num_nodes = self.options['num_nodes']
integrator_option = self.options['integrator']
time_setup = self.options['time_setup']
second_integrand = self.options['second_integrand']
zero_start = self.options['zero_start']
final_only = self.options['final_only']
test_auto_names = self.options['test_auto_names']
val = self.options['val']
num_nodes = num_nodes
iv = self.add_subsystem('iv', IndepVarComp())
integ = Integrator(diff_units=diff_units, num_nodes=num_nodes, method=integrator_option,
time_setup=time_setup)
if not test_auto_names:
integ.add_integrand('q', rate_name='dqdt', start_name='q_initial', end_name='q_final',
units=quantity_units, rate_units=rate_units, zero_start=zero_start,
final_only=final_only, val=val)
else:
integ.add_integrand('q', units=quantity_units, rate_units=rate_units, zero_start=zero_start,
final_only=final_only)
if second_integrand:
integ.add_integrand('q2', rate_name='dq2dt', start_name='q2_initial', end_name='q2_final', units='kJ')
iv.add_output('rate_to_integrate_2', val=np.ones((num_nodes,)), units='kW')
iv.add_output('initial_value_2', val=0., units='kJ')
self.connect('iv.rate_to_integrate_2', 'integral.dq2dt')
self.connect('iv.initial_value_2', 'integral.q2_initial')
self.add_subsystem('integral', integ)
if rate_units and quantity_units:
# overdetermined and possibly inconsistent
pass
elif not rate_units and not quantity_units:
if diff_units:
rate_units = '(' + diff_units +')** -1'
elif not rate_units:
# solve for rate_units in terms of quantity_units
if not diff_units:
rate_units = quantity_units
else:
rate_units = '('+quantity_units+') / (' + diff_units +')'
elif not quantity_units:
# solve for quantity units in terms of rate units
if not diff_units:
quantity_units = rate_units
else:
quantity_units = '('+rate_units+')*('+diff_units+')'
iv.add_output('rate_to_integrate', val=np.ones((num_nodes,)), units=rate_units)
iv.add_output('initial_value', val=0, units=quantity_units)
if not test_auto_names:
self.connect('iv.rate_to_integrate','integral.dqdt')
else:
self.connect('iv.rate_to_integrate','integral.q_rate')
if not zero_start:
self.connect('iv.initial_value', 'integral.q_initial')
if time_setup == 'dt':
iv.add_output('dt', val=1, units=diff_units)
self.connect('iv.dt', 'integral.dt')
elif time_setup == 'duration':
iv.add_output('duration', val=1*(num_nodes-1), units=diff_units)
self.connect('iv.duration', 'integral.duration')
elif time_setup == 'bounds':
iv.add_output('t_initial', val=2, units=diff_units)
iv.add_output('t_final', val=2 + 1*(num_nodes-1), units=diff_units)
self.connect('iv.t_initial','integral.t_initial')
self.connect('iv.t_final','integral.t_final')
class IntegratorCommonTestCases(object):
"""
A common set of test cases for the integrator component
"""
def test_uniform_no_units(self):
prob = Problem(IntegratorTestGroup(num_nodes=self.num_nodes, integrator=self.integrator))
prob.setup(check=True, force_alloc_complex=True)
prob.run_model()
num_nodes = self.num_nodes
nn_tot = num_nodes
assert_near_equal(prob['integral.q'], np.linspace(0, nn_tot-1, nn_tot), tolerance=1e-14)
assert_near_equal(prob.get_val('integral.q_final', units=None), nn_tot-1, tolerance=1e-14)
partials = prob.check_partials(method='cs',compact_print=True)
assert_check_partials(partials, atol=1e-8, rtol=1e0)
def test_linear_no_units(self):
num_nodes = self.num_nodes
nn_tot = num_nodes
x = np.linspace(0, nn_tot-1, nn_tot)
fprime = x
f = x ** 2 / 2
prob = Problem(IntegratorTestGroup(num_nodes=self.num_nodes, integrator=self.integrator))
prob.setup(check=True, force_alloc_complex=True)
prob['iv.rate_to_integrate'] = fprime
prob.run_model()
assert_near_equal(prob['integral.q'], f, tolerance=1e-14)
assert_near_equal(prob.get_val('integral.q_final', units=None), f[-1], tolerance=1e-14)
partials = prob.check_partials(method='cs',compact_print=True)
assert_check_partials(partials, atol=1e-8, rtol=1e0)
def test_quadratic_no_units(self):
num_nodes = self.num_nodes
nn_tot = num_nodes
x = np.linspace(0, nn_tot-1, nn_tot)
fprime = 4 * x **2 - 8*x + 5
f = 4 * x ** 3 / 3 - 8 * x ** 2 / 2 + 5*x
prob = Problem(IntegratorTestGroup(num_nodes=self.num_nodes, integrator=self.integrator))
prob.setup(check=True, force_alloc_complex=True)
prob['iv.rate_to_integrate'] = fprime
prob.run_model()
assert_near_equal(prob['integral.q'], f, tolerance=1e-14)
assert_near_equal(prob.get_val('integral.q_final', units=None), f[-1], tolerance=1e-14)
partials = prob.check_partials(method='cs',compact_print=True)
assert_check_partials(partials, atol=1e-8, rtol=1e0)
def test_machine_zero_rate(self):
num_nodes = self.num_nodes
nn_tot = num_nodes
x = np.linspace(0, nn_tot-1, nn_tot)
fprime = 0.0 * x
f = 0.0 * x
prob = Problem(IntegratorTestGroup(num_nodes=self.num_nodes, integrator=self.integrator))
prob.setup(check=True, force_alloc_complex=True)
prob['iv.rate_to_integrate'] = fprime
prob.run_model()
assert_near_equal(prob['integral.q'], f, tolerance=1e-14)
assert_near_equal(prob.get_val('integral.q_final', units=None), f[-1], tolerance=1e-14)
partials = prob.check_partials(method='cs',compact_print=True)
assert_check_partials(partials, atol=1e-8, rtol=1e0)
def test_auto_names(self):
num_nodes = self.num_nodes
nn_tot = num_nodes
x = | np.linspace(0, nn_tot-1, nn_tot) | numpy.linspace |
from stats_arrays.distributions import UniformUncertainty
from ..base import UncertaintyTestCase
import numpy as np
class UniformTestCase(UncertaintyTestCase):
def unif_params_1d(self):
oneDparams = self.make_params_array(1)
oneDparams['minimum'] = 1
oneDparams['loc'] = 2
oneDparams['maximum'] = 3
return oneDparams
def unif_params_2d(self):
params = self.make_params_array(2)
params['minimum'] = 1
params['loc'] = 2
params['maximum'] = 3
return params
def test_uniform_ppf(self):
oneDparams = self.unif_params_1d()
params = self.unif_params_2d()
self.assertTrue(np.allclose( | np.array([1, 2, 3]) | numpy.array |
import os
import random
import numpy as np
import scipy.io as sio
from tensorflow.keras.utils import to_categorical
def load_data(dataset_dir, subject_n=32, img_size=(8, 9, 8), number_of_inputs=1,
features_type='multi', num_classes=2, frames_per_subject=4800, seed=7):
img_rows, img_cols, num_chan = img_size
prefixs = ['DE_s', 'PSD_s']
if features_type == 'DE':
prefixs = prefixs[:1]
elif features_type == 'PSD':
prefixs = prefixs[1:]
elif features_type != 'multi':
raise NotImplementedError()
samples_number_per_subject = int(frames_per_subject / number_of_inputs) # tested only for [1...6] range
samples_numbers_list = list(range(samples_number_per_subject))
random.seed(seed)
y_a_list = []
y_v_list = []
x_list = []
subject_id_list = []
for i in range(subject_n):
short_name = f'{i + 1:02}'
random.shuffle(samples_numbers_list)
print("\nprocessing: ", short_name, "......")
file_path = os.path.join(dataset_dir, prefixs[0] + short_name)
file = sio.loadmat(file_path)
data = file['data']
y_v = file['valence_labels'][0]
y_a = file['arousal_labels'][0]
y_v = to_categorical(y_v, num_classes)
y_a = to_categorical(y_a, num_classes)
if len(prefixs) > 0:
for prefix in prefixs[1:]:
file_path = os.path.join(dataset_dir, prefix + short_name)
file = sio.loadmat(file_path)
data = np.concatenate([data, file['data']], axis=1)
one_falx = data.transpose([0, 2, 3, 1])
one_falx = one_falx.reshape((-1, number_of_inputs, img_rows, img_cols, num_chan))
one_y_v = np.empty([0, 2])
one_y_a = | np.empty([0, 2]) | numpy.empty |
#!/usr/bin/python3
'''
Abstract:
This is a program to plot the relation between two dataset with the same band.
Currently, we focus on twomass and ukidss
Usage:
plot_compared_histograms.py [twomass] [ukidss]
Editor:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20180611
####################################
update log
20180611 version alpha 1
1. the code works
'''
import time
import numpy as np
from sys import argv
import matplotlib.pyplot as plt
from IPython.core.pylabtools import figsize
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
#-----------------------------------
# check argv is right
if len(argv) != 3:
print ("Error!\nUsage: plot_compared_histograms.py [two mass] [ukidss]")
print ("Example: plot_compared_histograms.py star_sed_j.txt ukidss_j_star.txt")
exit()
# read argv
name_twomass = argv[1]
name_ukidss = argv[2]
#-----------------------------------
# load data
twomass = np.loadtxt(name_twomass, dtype = np.float64)
ukidss = np.loadtxt(name_ukidss, dtype = np.float64)
# wipe out non-sense data
twomass_too_high = np.where(twomass > 1E308)
twomass[twomass_too_high] = 0.0
twomass_too_low = | np.where(twomass <= -999) | numpy.where |
r"""
Module defining Pyclaw geometry objects.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import warnings
import six
from six.moves import range
from six.moves import zip
deprec_message = "'edges' has been deprecated; please use 'nodes' instead."
# ============================================================================
# Default function definitions
# ============================================================================
# Default mapc2p functions
def identity_map_1d(x):
return x,
def identity_map_2d(x,y):
return x,y
def identity_map_3d(x,y,z):
return x,y,z
identity_map={'1': identity_map_1d,
'2': identity_map_2d,
'3': identity_map_3d}
class Grid(object):
r"""
Representation of a single grid.
:Dimension information:
Each dimension has an associated name with it that can be accessed via
that name such as ``grid.x.num_cells`` which would access the x dimension's
number of cells.
:Properties:
If the requested property has multiple values, a list will be returned
with the corresponding property belonging to the dimensions in order.
:Initialization:
Input:
- *dimensions* - (list of :class:`Dimension`) Dimensions that are to
be associated with this grid
Output:
- (:class:`grid`) Initialized grid object
A PyClaw grid is usually constructed from a tuple of PyClaw Dimension objects:
>>> from clawpack.pyclaw.geometry import Dimension, Grid
>>> x = Dimension(0.,1.,10,name='x')
>>> y = Dimension(-1.,1.,25,name='y')
>>> grid = Grid((x,y))
>>> print(grid)
2-dimensional domain (x,y)
No mapping
Extent: [0.0, 1.0] x [-1.0, 1.0]
Cells: 10 x 25
We can query various properties of the grid:
>>> grid.num_dim
2
>>> grid.num_cells
[10, 25]
>>> grid.lower
[0.0, -1.0]
>>> grid.delta # Returns [dx, dy]
[0.1, 0.08]
A grid can be extended to higher dimensions using the add_dimension() method:
>>> z=Dimension(-2.0,2.0,21,name='z')
>>> grid.add_dimension(z)
>>> grid.num_dim
3
>>> grid.num_cells
[10, 25, 21]
Coordinates
===========
We can get the x, y, and z-coordinate arrays of cell nodes and centers from the grid.
Properties beginning with 'c' refer to the computational (unmapped) domain, while
properties beginning with 'p' refer to the physical (mapped) domain. For grids with
no mapping, the two are identical. Also note the difference between 'center' and
'centers'.
>>> import numpy as np
>>> np.set_printoptions(precision=2) # avoid doctest issues with roundoff
>>> grid.c_center([1,2,3])
array([ 0.15, -0.8 , -1.33])
>>> grid.p_nodes[0][0,0,0]
0.0
>>> grid.p_nodes[1][0,0,0]
-1.0
>>> grid.p_nodes[2][0,0,0]
-2.0
It's also possible to get coordinates for ghost cell arrays:
>>> x = Dimension(0.,1.,5,name='x')
>>> grid1d = Grid([x])
>>> grid1d.c_centers
[array([0.1, 0.3, 0.5, 0.7, 0.9])]
>>> grid1d.c_centers_with_ghost(2)
[array([-0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3])]
Mappings
========
A grid mapping can be used to solve in a domain that is not rectangular,
or to adjust the local spacing of grid cells. For instance, we can
use smaller cells on the left and larger cells on the right by doing:
>>> double = lambda xarr : np.array([x**2 for x in xarr])
>>> grid1d.mapc2p = double
>>> grid1d.p_centers
array([0.01, 0.09, 0.25, 0.49, 0.81])
Note that the 'nodes' (or nodes) of the mapped grid are the mapped values
of the computational nodes. In general, they are not the midpoints between
mapped centers:
>>> grid1d.p_nodes
array([0. , 0.04, 0.16, 0.36, 0.64, 1. ])
"""
def __getattr__(self,key):
# Provide dimension attribute lists when requested from Grid object.
# Note that this only gets called when one requests an attribute
# that the grid itself doesn't possess.
if key in ['num_cells','lower','upper','delta','units','centers','nodes',
'on_lower_boundary','on_upper_boundary']:
return self.get_dim_attribute(key)
else:
raise AttributeError("'Grid' object has no attribute '"+key+"'")
# ========== Property Definitions ========================================
@property
def num_dim(self):
r"""(int) - Number of dimensions"""
return len(self._dimensions)
@property
def dimensions(self):
r"""(list) - List of :class:`Dimension` objects defining the
grid's extent and resolution"""
return [getattr(self,name) for name in self._dimensions]
@property
def c_centers(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the computational locations of cell centers, see
:meth:`_compute_c_centers` for more info."""
self._compute_c_centers()
return self._c_centers
@property
def c_nodes(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the computational locations of cell nodes, see
:meth:`_compute_c_nodes` for more info."""
self._compute_c_nodes()
return self._c_nodes
@property
def p_centers(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the physical locations of cell centers, see
:meth:`_compute_p_centers` for more info."""
self._compute_p_centers()
return self._p_centers
@property
def p_nodes(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the physical locations of cell nodes, see
:meth:`_compute_p_nodes` for more info."""
self._compute_p_nodes()
return self._p_nodes
@property
def mapc2p(self):
return self._mapc2p
@mapc2p.setter
def mapc2p(self,mapc2p):
self._mapc2p = mapc2p
self._clear_cached_values()
# ========== Class Methods ===============================================
def __init__(self,dimensions):
r"""
Instantiate a Grid object
See :class:`Grid` for more info.
"""
# ========== Attribute Definitions ===================================
r"""(func) - Coordinate mapping function"""
self.gauges = []
r"""(list) - List of gauges' indices to be filled by add_gauges
method.
"""
self.gauge_file_names = []
r"""(list) - List of file names to write gauge values to"""
self.gauge_files = []
r"""(list) - List of file objects to write gauge values to"""
self.gauge_dir_name = '_gauges'
r"""(string) - Name of the output directory for gauges. If the
`Controller` class is used to run the application, this directory by
default will be created under the `Controller` `outdir` directory.
"""
self._p_centers = None
self._p_nodes = None
self._c_centers = None
self._c_nodes = None
# Dimension parsing
if isinstance(dimensions,Dimension):
dimensions = [dimensions]
self._dimensions = []
for dim in dimensions:
self.add_dimension(dim)
super(Grid,self).__init__()
def _clear_cached_values(self):
self._p_centers = None
self._p_nodes = None
self._c_centers = None
self._c_nodes = None
# ========== Dimension Manipulation ======================================
def add_dimension(self,dimension):
r"""
Add the specified dimension to this patch
:Input:
- *dimension* - (:class:`Dimension`) Dimension to be added
"""
# Add dimension to name list and as an attribute
if dimension.name in self._dimensions:
raise Exception('Unable to add dimension. A dimension'+
' of the same name: {name}, already exists.'
.format(name=dimension.name))
self._dimensions.append(dimension.name)
setattr(self,dimension.name,dimension)
self._clear_cached_values()
# Reset mapping as it presumably makes no sense now
self.mapc2p = identity_map[str(self.num_dim)]
def get_dim_attribute(self,attr):
r"""
Returns a tuple of all dimensions' attribute attr
"""
return [getattr(dim,attr) for dim in self.dimensions]
def __copy__(self):
return self.__class__(self)
def __str__(self):
output = "%s-dimensional domain " % str(self.num_dim)
output += "("+",".join([dim.name for dim in self.dimensions])+")\n"
if self.mapc2p in list(identity_map.values()):
output += "No mapping\n"
output += "Extent: "
else:
output += "Mapping function: "+self.mapc2p.__name__+"\n"
output += "Computational domain: "
output += " x ".join(["[{:.2}, {:.2}]".format(dim.lower, dim.upper)
for dim in self.dimensions])
output += "\n"
output += "Cells: "
output += " x ".join(["{}".format(dim.num_cells) for dim in self.dimensions])
return output
# ========== Coordinates =============================================
def _compute_c_centers(self, recompute=False):
r"""Calculate the coordinates of the centers in the computational domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_centers is None) or \
any([c is None for c in self.get_dim_attribute('_centers')]):
index = np.indices(self.num_cells)
self._c_centers = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
self._c_centers.append(center_array[index[i,...]])
def _compute_c_nodes(self, recompute=False):
r"""Calculate the coordinates of the nodes in the computational domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_nodes is None) or \
any([c is None for c in self.get_dim_attribute('_nodes')]):
index = np.indices(n+1 for n in self.num_cells)
self._c_nodes = []
for i,edge_array in enumerate(self.get_dim_attribute('nodes')):
self._c_nodes.append(edge_array[index[i,...]])
def _compute_p_centers(self, recompute=False):
r"""Calculate the coordinates of the centers in the physical domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._p_centers is None) or \
any([c is None for c in self.get_dim_attribute('_centers')]):
self._compute_c_centers(recompute=recompute)
self._p_centers = self.mapc2p(*self._c_centers)
def _compute_p_nodes(self, recompute=False):
r"""Calculate the coordinates of the nodes (corners) in the physical domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._p_nodes is None) or \
any([c is None for c in self.get_dim_attribute('_nodes')]):
self._compute_c_nodes(recompute=recompute)
self._p_nodes = self.mapc2p(*self._c_nodes)
def c_center(self,ind):
r"""Compute center of computational cell with index ind."""
index = [np.array(i) for i in ind]
return np.array([self.c_centers[i][index] for i in range(self.num_dim)])
def p_center(self,ind):
r"""Compute center of physical cell with index ind."""
return self.mapc2p(*self.c_center(ind))
def c_centers_with_ghost(self, num_ghost):
r"""
Calculate the coordinates of the cell centers, including
ghost cells, in the computational domain.
:Input:
- *num_ghost* - (int) Number of ghost cell layers
"""
index = np.indices(n+2*num_ghost for n in self.num_cells)
centers = []
for i,dim in enumerate(self.dimensions):
center_array = dim.centers_with_ghost(num_ghost)
centers.append(center_array[index[i,...]])
return centers
def c_nodes_with_ghost(self, num_ghost):
r"""
Calculate the coordinates of the cell nodes (corners), including
ghost cells, in the computational domain.
:Input:
- *num_ghost* - (int) Number of ghost cell layers
"""
index = np.indices(n+2*num_ghost+1 for n in self.num_cells)
nodes = []
for i,dim in enumerate(self.dimensions):
edge_array = dim.nodes_with_ghost(num_ghost)
nodes.append(edge_array[index[i,...]])
return nodes
def p_centers_with_ghost(self,num_ghost):
return self.mapc2p(*self.c_centers_with_ghost(num_ghost))
def p_nodes_with_ghost(self,num_ghost):
return self.mapc2p(*self.c_nodes_with_ghost(num_ghost))
# ========================================================================
# Edges: deprecated; will be removed in 6.0
@property
def c_edges(self):
warnings.warn(deprec_message)
return self.c_nodes
@property
def p_edges(self):
warnings.warn(deprec_message)
return self.p_nodes
def p_edges_with_ghost(self,num_ghost):
warnings.warn(deprec_message)
return self.p_nodes_with_ghost(num_ghost)
def c_edges_with_ghost(self, num_ghost):
warnings.warn(deprec_message)
return self.c_nodes_with_ghost(num_ghost)
# ========================================================================
# ========================================================================
# Gauges
# ========================================================================
def add_gauges(self,gauge_coords):
r"""
Determine the cell indices of each gauge and make a list of all gauges
with their cell indices.
"""
for gauge in gauge_coords:
# Check if gauge belongs to this grid:
if all(self.lower[n]<=gauge[n]<self.upper[n] for n in range(self.num_dim)):
# Set indices relative to this grid
gauge_index = [int(round((gauge[n]-self.lower[n])/self.delta[n]))
for n in range(self.num_dim)]
gauge_file_name = 'gauge'+'_'.join(str(coord) for coord in gauge)+'.txt'
self.gauge_file_names.append(gauge_file_name)
self.gauges.append(gauge_index)
def setup_gauge_files(self,outdir):
r"""
Creates and opens file objects for gauges.
"""
import os
gauge_path = os.path.join(outdir,self.gauge_dir_name)
if not os.path.exists(gauge_path):
try:
os.makedirs(gauge_path)
except OSError:
print("gauge directory already exists, ignoring")
for gauge in self.gauge_file_names:
gauge_file = os.path.join(gauge_path,gauge)
if os.path.isfile(gauge_file):
os.remove(gauge_file)
self.gauge_files.append(open(gauge_file,'a'))
def plot(self,num_ghost=0,mapped=True,mark_nodes=False,mark_centers=False):
r"""Make a plot of the grid.
By default the plot uses the mapping
grid.mapc2p and does not show any ghost cells. This can be modified
via the arguments `mapped` and `num_ghost`.
Returns a handle to the plot axis object.
"""
import matplotlib.pyplot as plt
if self.num_dim == 2:
fig, ax = plt.subplots(1,1)
if num_ghost>0:
if mapped:
xe, ye = self.p_nodes_with_ghost(num_ghost)
else:
xe, ye = self.c_nodes_with_ghost(num_ghost)
p = ax.pcolormesh(xe,ye,0*xe,edgecolors='k',cmap='bwr',alpha=0.2)
p.set_clim(-1,1)
if mapped:
xe, ye = self.p_nodes
xc, yc = self.p_centers
else:
xe, ye = self.c_nodes
xc, yc = self.c_centers
p = ax.pcolormesh(xe,ye,0*xe,edgecolors='k',cmap='bwr')
p.set_clim(-1,1)
if mark_nodes:
ax.plot(xe,ye,'or')
if mark_centers:
ax.plot(xc,yc,'ob')
ax.axis('equal')
ax.set_xlabel(self.dimensions[0].name)
ax.set_ylabel(self.dimensions[1].name)
return ax
else:
raise Exception('Grid plotting implemented for 2D grids only.')
def _check_validity(self):
for dim in self.dimensions:
dim._check_validity()
assert type(self.num_cells) is int, 'Dimension.num_cells must be an integer'
assert type(self.lower) is float, 'Dimension.lower must be a float'
assert type(self.upper) is float, 'Dimension.upper must be a float'
assert self.num_cells>0, 'Dimension.num_cells must be positive'
assert self.upper > self.lower, 'Dimension.upper must be greater than lower'
# ============================================================================
# Dimension Object
# ============================================================================
class Dimension(object):
r"""
Basic class representing a dimension of a Patch object
:Initialization:
Required arguments, in order:
- *lower* - (float) Lower extent of dimension
- *upper* - (float) Upper extent of dimension
- *num_cells* - (int) Number of cells
Optional (keyword) arguments:
- *name* - (string) string Name of dimension
- *units* - (string) Type of units, used for informational purposes only
Output:
- (:class:`Dimension`) - Initialized Dimension object
Example:
>>> from clawpack.pyclaw.geometry import Dimension
>>> x = Dimension(0.,1.,100,name='x')
>>> print(x)
Dimension x: (num_cells,delta,[lower,upper]) = (100,0.01,[0.0,1.0])
>>> x.name
'x'
>>> x.num_cells
100
>>> x.delta
0.01
>>> x.nodes[0]
0.0
>>> x.nodes[1]
0.01
>>> x.nodes[-1]
1.0
>>> x.centers[-1]
0.995
>>> len(x.centers)
100
>>> len(x.nodes)
101
"""
@property
def delta(self):
r"""(float) - Size of an individual, computational cell"""
return (self.upper-self.lower) / float(self.num_cells)
# ========== Edges: deprecated; will be removed in 6.0 =======
@property
def edges(self):
warnings.warn(deprec_message)
return self.nodes
def edges_with_ghost(self,num_ghost):
warnings.warn(deprec_message)
return self.nodes_with_ghost(num_ghost)
# ========================================================================
# ========== Centers and nodes ========================================
@property
def nodes(self):
r"""(ndarrary(:)) - Location of all cell edge coordinates
for this dimension"""
if self._nodes is None:
self._nodes = np.empty(self.num_cells+1)
for i in range(0,self.num_cells+1):
self._nodes[i] = self.lower + i*self.delta
return self._nodes
@property
def centers(self):
r"""(ndarrary(:)) - Location of all cell center coordinates
for this dimension"""
if self._centers is None:
self._centers = | np.empty(self.num_cells) | numpy.empty |
"""
Module to run tests on core.procimg functions.
"""
import os
from IPython import embed
import numpy as np
from astropy.convolution import convolve
from pypeit.tests.tstutils import dev_suite_required
from pypeit.core import procimg
from pypeit.images.rawimage import RawImage
from pypeit.spectrographs.util import load_spectrograph
from pypeit.par.pypeitpar import ProcessImagesPar
from pypeit import utils
def test_replace_columns():
y = np.zeros((10,3), dtype=float)
y[:,2] = 2
bad_col = np.array([False, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
procimg.replace_columns(y, bad_col, copy=True, replace_with='linear')), \
'Interpolation and mean should provide the same result.'
bad_col = np.array([False, True, True])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.zeros_like(y)), 'Should set everything to 0.'
bad_col = np.array([True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.full_like(y, 2)), 'Should set everything to 2.'
y = np.zeros((10,4), dtype=float)
y[:,3] = 3
bad_col = np.array([False, True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='linear'),
np.repeat(np.arange(4),10).reshape(4,10).T), \
'Interpolation failed.'
def test_rn2_frame():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
gain = np.array([1.2, 1.5])
rnvar = procimg.rn2_frame(datasec, rn, digitization=False)
assert rnvar.shape == datasec.shape, 'Shape mismatch'
assert np.array_equal(np.unique(rnvar), rn**2), 'Bad RN variance calculation'
rnvar = procimg.rn2_frame(datasec, rn, units='ADU', gain=gain, digitization=False)
assert np.allclose(np.unique(rnvar), (rn/gain)**2), 'Bad RN variance calculation'
def test_sub_overscan():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
oscan = np.zeros((10,10), dtype=int)
oscan[:5,-3:] = 1
oscan[5:,-3:] = 2
raw = np.zeros((10,10), dtype=float)
raw[datasec == 1] = 10.
raw[datasec == 2] = 20.
raw[oscan == 1] = 9.
raw[oscan == 2] = 19.
raw_sub, _ = procimg.subtract_overscan(raw, datasec, oscan, method='median')
assert np.array_equal(raw_sub[datasec > 0], np.ones(np.sum(datasec > 0), dtype=float)), \
'Bad overscan subtraction'
var = np.ones((10,10), dtype=float)
raw_sub, var_sub = procimg.subtract_overscan(raw, datasec, oscan, method='median', var=var)
assert np.array_equal(var_sub[datasec > 0],
np.full(np.sum(datasec > 0), np.pi/2/15, dtype=float)), \
'Bad variance calculation'
def test_trim():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
_datasec = procimg.trim_frame(datasec, datasec < 1)
assert _datasec.shape == (10,7), 'Trimming error'
assert np.array_equal(datasec[datasec > 0], _datasec.flat), 'Values changed'
def test_var_model():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
rnvar = procimg.rn2_frame(datasec, rn)
assert np.array_equal(rnvar, procimg.base_variance(rnvar)), \
'Variance model with only rnvar is just rnvar'
counts = np.full(rnvar.shape, 10., dtype=float)
assert np.array_equal(rnvar, procimg.variance_model(rnvar)), \
'Variance model should just return input if no optional parameters are provided.'
base = procimg.base_variance(rnvar, darkcurr=10.)
base_t = procimg.base_variance(rnvar, darkcurr=5., exptime=2.*3600)
assert np.all(procimg.variance_model(rnvar, counts=counts) > rnvar), \
'Shot noise should increase the variance'
assert np.all(procimg.variance_model(base, counts=counts) > base), \
'Shot noise should increase the variance'
assert np.array_equal(
procimg.variance_model(base, counts=counts),
procimg.variance_model(base_t, counts=counts)), \
'Dark current should be equivalent'
assert np.all(procimg.base_variance(rnvar, proc_var=10.) > rnvar), \
'Processing variance should increase the total variance'
assert np.all(procimg.variance_model(rnvar, counts=counts, count_scale=0.5) <
procimg.variance_model(rnvar, counts=counts)), \
'Scaling should have decreased the noise.'
assert np.all(procimg.variance_model(rnvar, counts=counts, noise_floor=0.1) > rnvar), \
'Noise floor should have increased the variance.'
def test_grow_mask():
mask = np.zeros((9,9), dtype=bool)
mask[4,4] = True
grw_msk = procimg.grow_mask(mask, 2.)
_grw_msk = np.zeros((9,9), dtype=bool)
_grw_msk[3:-3,3] = True
_grw_msk[2:-2,4] = True
_grw_msk[3:-3,5] = True
_grw_msk[3,3:-3] = True
_grw_msk[4,2:-2] = True
_grw_msk[5,3:-3] = True
assert | np.array_equal(grw_msk, _grw_msk) | numpy.array_equal |
# Class to do parallelized clustering
import os
import numpy as np
import networkx as nx
from sklearn.decomposition import PCA
from scipy.spatial import cKDTree
from scipy.stats import chi2
from yass.template import shift_chans, align_get_shifts_with_ref
from yass import mfm
from yass.util import absolute_path_to_asset
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
def warn(*args, **kwargs):
pass
warnings.warn = warn
class Cluster(object):
"""Class for doing clustering."""
def __init__(self, data_in, analysis=False):
"""Sets up the cluster class for each core
Parameters: ...
"""
# load data and check if prev completed
if self.load_data(data_in): return
if analysis: return
# local channel clustering
if self.verbose:
print("START LOCAL")
# neighbour channel clustering
self.initialize(indices_in=np.arange(len(self.spike_times_original)),
local=True)
self.cluster(current_indices=np.arange(len(self.indices_in)),
local=True,
gen=0,
branch=0,
hist=[])
if self.full_run:
if self.verbose:
print('START DISTANT')
# distant channel clustering
indices_train_local = np.copy(self.indices_train)
indices_train_final = []
templates_final = []
for ii, indices_train_k in enumerate(indices_train_local):
#if self.verbose: print("\nchan/unit {}, UNIT {}/{}".format(self.channel, ii, len(spike_train_local)))
self.distant_ii = ii
self.initialize(indices_in=indices_train_k,
local=False)
self.cluster_once(current_indices=np.arange(len(self.indices_in)),
local=False,
gen=self.history_local_final[ii][0]+1,
branch=self.history_local_final[ii][1],
hist=self.history_local_final[ii][1:])
#self.finish_plotting(local_unit_id=ii)
indices_train_final += self.indices_train
templates_final += self.templates
else:
indices_train_final = []
templates_final = []
for indices_train_k in self.indices_train:
template = self.get_templates_on_all_channels(indices_train_k)
templates_final.append(template)
indices_train_final.append(indices_train_k)
if (self.full_run) and (not self.raw_data):
templates_final_2 = []
indices_train_final_2 = []
for indices_train_k in indices_train_final:
template = self.get_templates_on_all_channels(indices_train_k)
templates_final_2.append(template)
indices_train_final_2.append(indices_train_k)
templates_final = templates_final_2
indices_train_final = indices_train_final_2
# save clusters
self.save_result(indices_train_final, templates_final)
def cluster(self, current_indices, local, gen, branch, hist):
''' Recursive clustering function
channel: current channel being clusterd
wf = wf_PCA: denoised waveforms (# spikes, # time points, # chans)
sic = spike_indices of spikes on current channel
gen = generation of cluster; increases with each clustering step
hist = is the current branch parent history
'''
if self.min(current_indices.shape[0]): return
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', branch: ' +
str(branch)+', # spikes: '+ str(current_indices.shape[0]))
# featurize #1
pca_wf = self.featurize_step(gen, current_indices, current_indices, local)
# knn triage
if self.raw_data:
idx_keep = self.knn_triage_step(gen, pca_wf)
pca_wf = pca_wf[idx_keep]
current_indices = current_indices[idx_keep]
## subsample if too many
#pca_wf_subsample = self.subsample_step(gen, pca_wf_triage)
## run mfm
#vbParam1 = self.run_mfm(gen, pca_wf_subsample)
vbParam2 = self.run_mfm(gen, pca_wf)
## recover spikes using soft-assignments
#idx_recovered, vbParam2 = self.recover_step(gen, vbParam1, pca_wf)
#if self.min(idx_recovered.shape[0]): return
## if recovered spikes < total spikes, do further indexing
#if idx_recovered.shape[0] < pca_wf.shape[0]:
# current_indices = current_indices[idx_recovered]
# pca_wf = pca_wf[idx_recovered]
# connecting clusters
if vbParam2.rhat.shape[1] > 1:
cc_assignment, stability, idx_keep = self.get_cc_and_stability(vbParam2)
current_indices = current_indices[idx_keep]
pca_wf = pca_wf[idx_keep]
else:
cc_assignment = np.zeros(pca_wf.shape[0], 'int32')
stability = [1]
# save generic metadata containing current branch info
self.save_metadata(pca_wf, vbParam2, cc_assignment, current_indices, local,
gen, branch, hist)
# single cluster
if len(stability) == 1:
self.single_cluster_step(current_indices, pca_wf, local,
gen, branch, hist)
# multiple clusters
else:
self.multi_cluster_step(current_indices, pca_wf, local,
cc_assignment, gen, branch, hist)
def cluster_once(self, current_indices, local, gen, branch, hist):
''' Recursive clustering function
channel: current channel being clusterd
wf = wf_PCA: denoised waveforms (# spikes, # time points, # chans)
sic = spike_indices of spikes on current channel
gen = generation of cluster; increases with each clustering step
hist = is the current branch parent history
'''
if self.min(current_indices.shape[0]): return
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', branch: ' +
str(branch)+', # spikes: '+ str(current_indices.shape[0]))
# featurize #1
pca_wf = self.featurize_step(gen, current_indices, current_indices, local)
# knn triage
if self.raw_data:
idx_keep = self.knn_triage_step(gen, pca_wf)
pca_wf = pca_wf[idx_keep]
current_indices = current_indices[idx_keep]
## subsample if too many
#pca_wf_subsample = self.subsample_step(gen, pca_wf_triage)
## run mfm
#vbParam1 = self.run_mfm(gen, pca_wf_subsample)
vbParam2 = self.run_mfm(gen, pca_wf)
# assignment
assignment = vbParam2.rhat.argmax(1)
# save generic metadata containing current branch info
self.save_metadata(pca_wf, vbParam2, assignment, current_indices, local,
gen, branch, hist)
unique_assignment = np.unique(assignment)
for k in unique_assignment:
idx_ = np.where(assignment == k)[0]
if not self.min(len(idx_)):
self.single_cluster_step(current_indices[idx_],
pca_wf[idx_], local,
gen, branch, hist)
def save_metadata(self, pca_wf_all, vbParam, cc_label, current_indices, local,
gen, branch, hist):
self.pca_post_triage_post_recovery.append(pca_wf_all)
self.gen_label.append(cc_label)
self.gen_local.append(local)
#self.vbPar_muhat.append(vbParam2.muhat)
self.vbPar_rhat.append(vbParam.rhat)
# save history for every clustered distributions
size_ = 2
size_ += len(hist)
temp = np.zeros(size_, 'int32')
temp[0]=gen
temp[1:-1]=hist
temp[-1]=branch
self.hist.append(temp)
# save history again if local clustering converges in order to do
# distant clustering tracking
self.hist_local = temp
if gen==0 and local:
#self.pca_wf_allchans = self.pca_wf_allchans#[current_indices]
self.indices_gen0 = current_indices
def min(self, n_spikes):
''' Function that checks if spikes left are lower than min_spikes
'''
if n_spikes < self.min_spikes:
return True
return False
def load_data(self, data_in):
''' *******************************************
************ LOADED PARAMETERS ************
*******************************************
'''
# load all input
self.raw_data = data_in[0]
self.full_run = data_in[1]
self.CONFIG = data_in[2]
self.reader_raw = data_in[3]
self.reader_resid = data_in[4]
self.filename_postclustering = data_in[5]
if os.path.exists(self.filename_postclustering):
return True
else:
input_data = np.load(data_in[6])
self.spike_times_original = input_data['spike_times']
self.min_spikes = input_data['min_spikes']
# if there is no spike to cluster, finish
if len(self.spike_times_original) < self.min_spikes:
return True
self.wf_global = input_data['wf']
self.denoised_wf = input_data['denoised_wf']
self.shifts = input_data['shifts']
self.channel = input_data['channel']
if not self.raw_data:
self.upsampled_templates = input_data['up_templates']
self.upsampled_ids = input_data['upsampled_ids']
''' ******************************************
*********** FIXED PARAMETERS *************
******************************************
'''
# These are not user/run specific, should be stayed fixed
self.verbose = False
self.selected_PCA_rank = 5
# threshold at which to set soft assignments to 0
self.assignment_delete_threshold = 0.001
# spike size
self.spike_size = self.CONFIG.spike_size
self.neighbors = self.CONFIG.neigh_channels
self.triage_value = self.CONFIG.cluster.knn_triage
# random subsample, remove edge spikes
#self.clean_input_data()
# if there is no spike to cluster, finish
if len(self.spike_times_original) == 0:
return True
''' ******************************************
*********** SAVING PARAMETERS ************
******************************************
'''
# flag to load all chans waveforms and featurizat for ari's work
self.ari_flag = False
self.wf_global_allchans = None
self.pca_wf_allchans = None
self.indices_gen0 = None
self.data_to_fit = None
self.pca_wf_gen0 = None
# list that holds all the final clustered indices for the premerge clusters
self.clustered_indices_local = []
self.clustered_indices_distant = []
# keep track of local idx source for distant clustering in order to
# index into original distribution indexes
self.distant_ii = None
# initialize metadata saves; easier to do here than using local flags + conditional
self.pca_post_triage_post_recovery=[]
self.vbPar_rhat=[]
#self.vbPar_muhat=[]
self.hist=[]
self.gen_label = []
self.gen_local = []
# this list track the first clustering indexes
self.history_local_final=[]
# return flag that clustering not yet complete
return False
def clean_input_data(self):
# limit clustering to at most 50,000 spikes
max_spikes = self.CONFIG.cluster.max_n_spikes
if len(self.spike_times_original)>max_spikes:
idx_sampled = np.random.choice(
a=np.arange(len(self.spike_times_original)),
size=max_spikes,
replace=False)
self.spike_times_original = self.spike_times_original[idx_sampled]
else:
idx_sampled = np.arange(len(self.spike_times_original))
# limit indexes away from edge of recording
idx_inbounds = np.where(np.logical_and(
self.spike_times_original>=self.spike_size//2,
self.spike_times_original<(self.reader_raw.rec_len-self.spike_size)))[0]
self.spike_times_original = self.spike_times_original[
idx_inbounds].astype('int32')
# clean upsampled ids if available
if not self.raw_data:
self.upsampled_ids = self.upsampled_ids[
idx_sampled][idx_inbounds].astype('int32')
def initialize(self, indices_in, local):
# reset spike_train and templates for both local and distant clustering
self.indices_train = []
self.templates = []
self.indices_in = indices_in
self.neighbor_chans = np.where(self.neighbors[self.channel])[0]
if local:
# initialize
#self.shifts = np.zeros(len(self.spike_times_original))
#self.find_main_channel()
self.loaded_channels = self.neighbor_chans
else:
# load waveforms
if len(self.indices_in) > 0:
self.load_waveforms(local)
# align waveforms
self.align_step(local)
# denoise waveforms on active channels
self.denoise_step(local)
def find_main_channel(self):
if len(self.spike_times_original) > 500:
idx_sampled = np.random.choice(
a=np.arange(len(self.spike_times_original)),
size=500,
replace=False)
else:
idx_sampled = np.arange(len(self.spike_times_original))
sample_spike_times = self.spike_times_original[idx_sampled]
if self.raw_data:
wf, _ = self.reader_raw.read_waveforms(
sample_spike_times, self.spike_size)
# or from residual and add templates
else:
units_ids_sampled = self.upsampled_ids[idx_sampled]
wf, _ = self.reader_resid.read_clean_waveforms(
sample_spike_times, units_ids_sampled,
self.upsampled_templates, self.spike_size)
# find max channel
self.channel = np.mean(wf, axis=0).ptp(0).argmax()
def load_waveforms(self, local):
''' Waveforms only loaded once in gen0 before local clustering starts
'''
if self.verbose:
print ("chan "+str(self.channel)+", loading {} waveforms".format(
len(self.indices_in)))
if local:
self.loaded_channels = self.neighbor_chans
else:
self.loaded_channels = np.arange(self.reader_raw.n_channels)
# load waveforms from raw data
spike_times = self.spike_times_original[self.indices_in]
if self.raw_data:
self.wf_global, skipped_idx = self.reader_raw.read_waveforms(
spike_times, self.spike_size, self.loaded_channels)
# or from residual and add templates
else:
unit_ids = self.upsampled_ids[self.indices_in]
self.wf_global, skipped_idx = self.reader_resid.read_clean_waveforms(
spike_times, unit_ids, self.upsampled_templates,
self.spike_size, self.loaded_channels)
# Cat: TODO: we're cliping the waveforms at 1000 SU; need to check this
# clip waveforms; seems necessary for neuropixel probe due to artifacts
self.wf_global = self.wf_global.clip(min=-1000, max=1000)
# delete any spikes that could not be loaded in previous step
if len(skipped_idx)>0:
self.indices_in = np.delete(self.indices_in, skipped_idx)
def align_step(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", aligning")
# align waveforms by finding best shfits
if local:
mc = np.where(self.loaded_channels==self.channel)[0][0]
best_shifts = align_get_shifts_with_ref(
self.wf_global[:, :, mc])
self.shifts[self.indices_in] = best_shifts
else:
best_shifts = self.shifts[self.indices_in]
self.wf_global = shift_chans(self.wf_global, best_shifts)
if self.ari_flag:
pass
#self.wf_global_allchans = shift_chans(self.wf_global_allchans,
# best_shifts)
def denoise_step(self, local):
if local:
self.denoise_step_local()
else:
self.denoise_step_distant4()
if self.verbose:
print ("chan "+str(self.channel)+", waveorms denoised to {} dimensions".format(self.denoised_wf.shape[1]))
def denoise_step_local(self):
# align, note: aligning all channels to max chan which is appended to the end
# note: max chan is first from feat_chans above, ensure order is preserved
# note: don't want for wf array to be used beyond this function
# Alignment: upsample max chan only; linear shift other chans
n_data, _, n_chans = self.wf_global.shape
self.denoised_wf = np.zeros((n_data, self.pca_main_components_.shape[0], n_chans),
dtype='float32')
for ii in range(n_chans):
if self.loaded_channels[ii] == self.channel:
self.denoised_wf[:, :, ii] = np.matmul(
self.wf_global[:, :, ii],
self.pca_main_components_.T)/self.pca_main_noise_std[np.newaxis]
else:
self.denoised_wf[:, :, ii] = np.matmul(
self.wf_global[:, :, ii],
self.pca_sec_components_.T)/self.pca_sec_noise_std[np.newaxis]
self.denoised_wf = np.reshape(self.denoised_wf, [n_data, -1])
#energy = np.median(np.square(self.denoised_wf), axis=0)
#good_features = np.where(energy > 0.5)[0]
#if len(good_features) < self.selected_PCA_rank:
# good_features = np.argsort(energy)[-self.selected_PCA_rank:]
#self.denoised_wf = self.denoised_wf[:, good_features]
def denoise_step_distant(self):
# active locations with negative energy
energy = np.median(np.square(self.wf_global), axis=0)
good_t, good_c = np.where(energy > 0.5)
# limit to max_timepoints per channel
max_timepoints = 3
unique_channels = np.unique(good_c)
idx_keep = np.zeros(len(good_t), 'bool')
for channel in unique_channels:
idx_temp = np.where(good_c == channel)[0]
if len(idx_temp) > max_timepoints:
idx_temp = idx_temp[
np.argsort(
energy[good_t[idx_temp], good_c[idx_temp]]
)[-max_timepoints:]]
idx_keep[idx_temp] = True
good_t = good_t[idx_keep]
good_c = good_c[idx_keep]
if len(good_t) == 0:
good_t, good_c = np.where(energy == np.max(energy))
self.denoised_wf = self.wf_global[:, good_t, good_c]
def denoise_step_distant2(self):
# active locations with negative energy
#energy = np.median(np.square(self.wf_global), axis=0)
#template = np.median(self.wf_global, axis=0)
#good_t, good_c = np.where(np.logical_and(energy > 0.5, template < - 0.5))
template = np.median(self.wf_global, axis=0)
good_t, good_c = np.where(template < -0.5)
if len(good_t) > self.selected_PCA_rank:
t_diff = 1
# lowest among all
#main_c_loc = np.where(good_c==self.channel)[0]
#max_chan_energy = energy[good_t[main_c_loc]][:,self.channel]
#index = main_c_loc[np.argmax(max_chan_energy)]
index = template[good_t, good_c].argmin()
keep = connecting_points(np.vstack((good_t, good_c)).T, index, self.neighbors, t_diff)
good_t = good_t[keep]
good_c = good_c[keep]
# limit to max_timepoints per channel
max_timepoints = 3
unique_channels = np.unique(good_c)
idx_keep = np.zeros(len(good_t), 'bool')
for channel in unique_channels:
idx_temp = np.where(good_c == channel)[0]
if len(idx_temp) > max_timepoints:
idx_temp = idx_temp[np.argsort(
template[good_t[idx_temp], good_c[idx_temp]])[:max_timepoints]]
idx_keep[idx_temp] = True
good_t = good_t[idx_keep]
good_c = good_c[idx_keep]
self.denoised_wf = self.wf_global[:, good_t, good_c]
else:
idx = np.argsort(template.reshape(-1))[-self.selected_PCA_rank:]
self.denoised_wf = self.wf_global.reshape(self.wf_global.shape[0], -1)[:, idx]
def denoise_step_distant3(self):
energy = np.median(self.wf_global, axis=0)
max_energy = np.min(energy, axis=0)
main_channel_loc = np.where(self.loaded_channels == self.channel)[0][0]
# max_energy_loc is n x 2 matrix, where each row has time point and channel info
th = np.max((-0.5, max_energy[main_channel_loc]))
max_energy_loc_c = np.where(max_energy <= th)[0]
max_energy_loc_t = energy.argmin(axis=0)[max_energy_loc_c]
max_energy_loc = np.hstack((max_energy_loc_t[:, np.newaxis],
max_energy_loc_c[:, np.newaxis]))
t_diff = 3
index = np.where(max_energy_loc[:, 1]== main_channel_loc)[0][0]
keep = connecting_points(max_energy_loc, index, self.neighbors, t_diff)
if np.sum(keep) >= self.selected_PCA_rank:
max_energy_loc = max_energy_loc[keep]
else:
idx_sorted = np.argsort(
energy[max_energy_loc[:,0], max_energy_loc[:,1]])[-self.selected_PCA_rank:]
max_energy_loc = max_energy_loc[idx_sorted]
# exclude main and secondary channels
#if np.sum(~np.in1d(max_energy_loc[:,1], self.neighbor_chans)) > 0:
# max_energy_loc = max_energy_loc[~np.in1d(max_energy_loc[:,1], self.neighbor_chans)]
#else:
# max_energy_loc = max_energy_loc[max_energy_loc[:,1]==main_channel_loc]
# denoised wf in distant channel clustering is
# the most active time point in each active channels
self.denoised_wf = self.wf_global[:, max_energy_loc[:,0], max_energy_loc[:,1]]
#self.denoised_wf = np.zeros((self.wf_global.shape[0], len(max_energy_loc)), dtype='float32')
#for ii in range(len(max_energy_loc)):
# self.denoised_wf[:, ii] = self.wf_global[:, max_energy_loc[ii,0], max_energy_loc[ii,1]]
def denoise_step_distant4(self):
# energy defined as median square
energy = np.median(np.square(self.wf_global), axis=0)
max_energy = np.max(energy, axis=0)
# active channels
active_chan = np.where(max_energy > 0.5)[0]
mc = np.where(self.loaded_channels == self.channel)[0][0]
mc = np.where(active_chan == mc)[0][0]
# only selected ones connected from max channels
keep = connecting_points2(active_chan, mc, self.neighbors)
selected_chans = active_chan[keep]
# if number of selected channels is less than PCA rank,
# add more
if len(selected_chans) < self.selected_PCA_rank:
unselected_chans = active_chan[~keep]
n_chan_diff = self.selected_PCA_rank - len(selected_chans)
additional_chans = unselected_chans[np.argsort(
max_energy[unselected_chans])[::-1][:n_chan_diff]]
selected_chans = np.hstack((selected_chans, additional_chans))
median_template = np.median(self.wf_global[:,:,selected_chans], 0)
min_points = median_template.argmin(0)
max_points = median_template.argmax(0)
self.denoised_wf = (self.wf_global[:,max_points,selected_chans]
- self.wf_global[:,min_points,selected_chans])
def featurize_step(self, gen, indices_to_feat, indices_to_transform, local):
''' Indices hold the index of the current spike times relative all spikes
'''
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', featurizing')
# find high variance area.
# Including low variance dimensions can lead to overfitting
# (splitting based on collisions)
rank = min(len(indices_to_feat), self.denoised_wf.shape[1], self.selected_PCA_rank)
#stds = np.std(self.denoised_wf[indices_to_feat], axis=0)
#good_d = np.where(stds > 1.05)[0]
#if len(good_d) < rank:
# good_d = np.argsort(stds)[::-1][:rank]
pca = PCA(n_components=rank)
#pca.fit(self.denoised_wf[indices_to_feat][:, good_d])
#pca_wf = pca.transform(
# self.denoised_wf[indices_to_transform][:, good_d]).astype('float32')
pca.fit(self.denoised_wf[indices_to_feat])
pca_wf = pca.transform(
self.denoised_wf[indices_to_transform]).astype('float32')
if gen==0 and local:
# save gen0 distributions before triaging
#data_to_fit = self.denoised_wf[:, good_d]
#n_samples, n_features = data_to_fit.shape
#pca = PCA(n_components=min(self.selected_PCA_rank, n_features))
#pca_wf_gen0 = pca.fit_transform(data_to_fit)
#self.pca_wf_gen0 = pca_wf_gen0.copy()
self.pca_wf_gen0 = pca_wf.copy()
if self.ari_flag and gen==0 and local:
# Cat: TODO: do this only once per channel
# Also, do not index into wf_global_allchans; that's done at completion
#if self.wf_global_allchans.shape[1] > self.selected_PCA_rank:
# denoise global data:
wf_global_denoised = self.denoise_step_distant_all_chans()
# flatten data over last 2 dimensions first
n_data, _ = wf_global_denoised.shape
wf_allchans_2D = wf_global_denoised
stds = np.std(wf_allchans_2D, axis=0)
good_d = np.where(stds > 1.05)[0]
if len(good_d) < self.selected_PCA_rank:
good_d = np.argsort(stds)[::-1][:self.selected_PCA_rank]
data_to_fit = wf_allchans_2D[:, good_d]
n_samples, n_features = data_to_fit.shape
pca = PCA(n_components=min(self.selected_PCA_rank, n_features))
# keep original uncompressed data
self.data_to_fit = data_to_fit
# compress data to selectd pca rank
self.pca_wf_allchans = pca.fit_transform(data_to_fit)
return pca_wf
def subsample_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', random subsample')
if pca_wf.shape[0]> self.max_mfm_spikes:
#if self.full_run:
if True:
idx_subsampled = coreset(
pca_wf, self.max_mfm_spikes)
else:
idx_subsampled = np.random.choice(np.arange(pca_wf.shape[0]),
size=self.max_mfm_spikes,
replace=False)
pca_wf = pca_wf[idx_subsampled]
return pca_wf
def run_mfm(self, gen, pca_wf):
mask = np.ones((pca_wf.shape[0], 1))
group = np.arange(pca_wf.shape[0])
vbParam = mfm.spikesort(pca_wf[:,:,np.newaxis],
mask,
group,
self.CONFIG)
if self.verbose:
print("chan "+ str(self.channel)+', gen '\
+str(gen)+", "+str(vbParam.rhat.shape[1])+" clusters from ",pca_wf.shape)
return vbParam
def knn_triage_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', knn triage')
knn_triage_threshold = 100*(1-self.triage_value)
if pca_wf.shape[0] > 1/self.triage_value:
idx_keep = knn_triage(knn_triage_threshold, pca_wf)
idx_keep = np.where(idx_keep==1)[0]
else:
idx_keep = np.arange(pca_wf.shape[0])
return idx_keep
def knn_triage_dynamic(self, gen, vbParam, pca_wf):
ids = np.where(vbParam.nuhat > self.min_spikes)[0]
if ids.size <= 1:
self.triage_value = 0
return np.arange(pca_wf.shape[0])
muhat = vbParam.muhat[:,ids,0].T
cov = vbParam.invVhat[:,:,ids,0].T / vbParam.nuhat[ids,np.newaxis, np.newaxis]
# Cat: TODO: move to CONFIG/init function
min_spikes = min(self.min_spikes_triage, pca_wf.shape[0]//ids.size) ##needs more systematic testing, working on it
pca_wf_temp = np.zeros([min_spikes*cov.shape[0], cov.shape[1]])
#assignment_temp = np.zeros(min_spikes*cov.shape[0], dtype = int)
for i in range(cov.shape[0]):
pca_wf_temp[i*min_spikes:(i+1)*min_spikes]= np.random.multivariate_normal(muhat[i], cov[i], min_spikes)
#assignment_temp[i*min_spikes:(i+1)*min_spikes] = i
kdist_temp = knn_dist(pca_wf_temp)
kdist_temp = kdist_temp[:,1:]
median_distances = np.zeros([cov.shape[0]])
for i in range(median_distances.shape[0]):
#median_distances[i] = np.median(np.median(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 0), axis = 0)
median_distances[i] = np.percentile(np.sum(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 1), 90)
## The percentile value also needs to be tested, value of 50 and scale of 1.2 works wells
kdist = np.sum(knn_dist(pca_wf)[:, 1:], axis=1)
min_threshold = np.percentile(kdist, 100*float(self.CONFIG.cluster.min_spikes)/len(kdist))
threshold = max(np.median(median_distances), min_threshold)
idx_keep = kdist <= threshold
self.triage_value = 1.0 - idx_keep.sum()/idx_keep.size
if np.sum(idx_keep) < self.min_spikes:
raise ValueError("{} kept out of {}, min thresh: {}, actual threshold {}, max dist {}".format(idx_keep.sum(),idx_keep.size, min_threshold, threshold, np.max(kdist)))
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', '+str(np.round(self.triage_value*100))+'% triaged from adaptive knn triage')
return np.where(idx_keep)[0]
def recover_step(self, gen, vbParam, pca_wf_all):
# for post-deconv reclustering, we can safely cluster only 10k spikes or less
idx_recovered, vbParam = self.recover_spikes(vbParam, pca_wf_all)
if self.verbose:
print ("chan "+ str(self.channel)+', gen '+str(gen)+", recovered ",
str(idx_recovered.shape[0])+ " spikes")
return idx_recovered, vbParam
def recover_spikes(self, vbParam, pca, maha_dist=1):
N, D = pca.shape
# Cat: TODO: check if this maha thresholding recovering distance is good
threshold = np.sqrt(chi2.ppf(0.99, D))
# update rhat on full data
maskedData = mfm.maskData(pca[:,:,np.newaxis], np.ones([N, 1]), np.arange(N))
vbParam.update_local(maskedData)
# calculate mahalanobis distance
maha = mfm.calc_mahalonobis(vbParam, pca[:,:,np.newaxis])
idx_recovered = np.where(~np.all(maha >= threshold, axis=1))[0]
vbParam.rhat = vbParam.rhat[idx_recovered]
# zero out low assignment vals
if True:
vbParam.rhat[vbParam.rhat < self.assignment_delete_threshold] = 0
vbParam.rhat = vbParam.rhat/np.sum(vbParam.rhat,
1, keepdims=True)
return idx_recovered, vbParam
def calculate_stability(self, rhat):
K = rhat.shape[1]
mask = rhat > 0.05
stability = np.zeros(K)
for clust in range(stability.size):
if mask[:,clust].sum() == 0.0:
continue
stability[clust] = np.average(mask[:,clust] * rhat[:,clust], axis = 0, weights = mask[:,clust])
return stability
def get_k_cc(self, maha, maha_thresh_min, k_target):
# it assumes that maha_thresh_min gives
# at least k+1 number of connected components
k_now = k_target + 1
if len(self.get_cc(maha, maha_thresh_min)) != k_now:
raise ValueError("something is not right")
maha_thresh = maha_thresh_min
while k_now > k_target:
maha_thresh += 1
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
else:
maha_thresh_max = maha_thresh
maha_thresh_min = maha_thresh - 1
if len(self.get_cc(maha, maha_thresh_min)) <= k_target:
raise ValueError("something is not right")
ctr = 0
maha_thresh_max_init = maha_thresh_max
while True:
ctr += 1
maha_thresh = (maha_thresh_max + maha_thresh_min)/2.0
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
elif k_now > k_target:
maha_thresh_min = maha_thresh
elif k_now < k_target:
maha_thresh_max = maha_thresh
if ctr > 1000:
print(k_now, k_target, maha_thresh, maha_thresh_max_init)
print(cc)
print(len(self.get_cc(maha, maha_thresh+0.001)))
print(len(self.get_cc(maha, maha_thresh-0.001)))
raise ValueError("something is not right")
def get_cc(self, maha, maha_thresh):
row, column = np.where(maha<maha_thresh)
G = nx.DiGraph()
for i in range(maha.shape[0]):
G.add_node(i)
for i, j in zip(row,column):
G.add_edge(i, j)
cc = [list(units) for units in nx.strongly_connected_components(G)]
return cc
def cluster_annealing(self, vbParam):
N, K = vbParam.rhat.shape
stability = self.calculate_stability(vbParam.rhat)
if (K == 2) or | np.all(stability > 0.8) | numpy.all |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import division, print_function
import ast
from copy import copy
from collections import OrderedDict
from math import cos, sin
import numpy as np
from six import string_types
import warnings
from ase.geometry import cellpar_to_cell, complete_cell, get_distances
from matplotlib.colors import rgb2hex
from scipy.interpolate import interp1d
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.sparse_list import SparseArray, SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement, ElementColorDictionary
from pyiron.base.settings.generic import Settings
from scipy.spatial import cKDTree, Voronoi
try:
import spglib
except ImportError:
try:
import pyspglib as spglib
except ImportError:
raise ImportError("The spglib package needs to be installed")
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class Atoms(object):
"""
The Atoms class represents all the information required to describe a structure at the atomic scale. This class is
written in such a way that is compatible with the `ASE atoms class`_. Some of the functions in this module is based
on the corresponding implementation in the ASE package
Args:
elements (list/numpy.ndarray): List of strings containing the elements or a list of
atomistics.structure.periodic_table.ChemicalElement instances
numbers (list/numpy.ndarray): List of atomic numbers of elements
symbols (list/numpy.ndarray): List of chemical symbols
positions (list/numpy.ndarray): List of positions
scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates)
pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes
cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure
Note: Only one of elements/symbols or numbers should be assigned during initialization
Attributes:
indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms
.. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html
"""
def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None,
magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None,
calculator=None, info=None, indices=None, elements=None, dimension=None, species=None,
**qwargs):
if symbols is not None:
if elements is None:
elements = symbols
else:
raise ValueError("Only elements OR symbols should be given.")
if tags is not None or momenta is not None or masses is not None or charges is not None \
or celldisp is not None or constraint is not None or calculator is not None or info is not None:
s.logger.debug('Not supported parameter used!')
self._store_elements = dict()
self._species_to_index_dict = None
self.colorLut = ElementColorDictionary().to_lut()
self._is_scaled = False
if cell is not None:
# make it ASE compatible
if np.linalg.matrix_rank(cell) == 1:
cell = np.eye(len(cell)) * cell
else:
cell = np.array(cell)
self._cell = cell
self._species = list()
self.positions= None
self._pse = PeriodicTable()
self._tag_list = SparseArray()
self.indices = np.array([])
self._info = dict()
self.arrays = dict()
self.adsorbate_info = {}
self.bonds = None
self._pbc = False
self.dimension = 3 # Default
self.units = {"length": "A", "mass": "u"}
el_index_lst = list()
element_list = None
if (elements is None) and (numbers is None) and (indices is None):
return
if numbers is not None: # for ASE compatibility
if not (elements is None):
raise AssertionError()
elements = self.numbers_to_elements(numbers)
if elements is not None:
el_object_list = None
if isinstance(elements, str):
element_list = self.convert_formula(elements)
elif isinstance(elements, (list, tuple, np.ndarray)):
if not all([isinstance(el, elements[0].__class__) for el in elements]):
object_list = list()
for el in elements:
if isinstance(el, (str, np.str, np.str_)):
object_list.append(self.convert_element(el))
if isinstance(el, ChemicalElement):
object_list.append(el)
if isinstance(el, Atom):
object_list.append(el.element)
if isinstance(el, (int, np.integer)):
# pse = PeriodicTable()
object_list.append(self._pse.element(el))
el_object_list = object_list
if len(elements) == 0:
element_list = elements
else:
if isinstance(elements[0], (list, tuple, np.ndarray)):
elements = np.array(elements).flatten()
if isinstance(elements[0], string_types):
element_list = elements
elif isinstance(elements[0], ChemicalElement):
el_object_list = elements
elif isinstance(elements[0], Atom):
el_object_list = [el.element for el in elements]
positions = [el.position for el in elements]
elif elements.dtype in [int, np.integer]:
el_object_list = self.numbers_to_elements(elements)
else:
raise ValueError('Unknown static type for element in list: ' + str(type(elements[0])))
if el_object_list is None:
el_object_list = [self.convert_element(el) for el in element_list]
self.set_species(list(set(el_object_list)))
# species_to_index_dict = {el: i for i, el in enumerate(self.species)}
el_index_lst = [self._species_to_index_dict[el] for el in el_object_list]
elif indices is not None:
el_index_lst = indices
self.set_species(species)
if scaled_positions is not None:
if positions is not None:
raise ValueError("either position or scaled_positions can be given")
if cell is None:
raise ValueError('scaled_positions can only be used with a given cell')
positions = np.dot(np.array(cell).T, np.array(scaled_positions).T).T
if positions is None:
self.dimension = 3
if cell is not None:
positions = np.zeros((len(el_index_lst), self.dimension))
self.indices = np.array(el_index_lst)
self.positions = np.array(positions).astype(np.float)
self._tag_list._length = len(positions)
for key, val in qwargs.items():
print('set qwargs (ASE): ', key, val)
setattr(self, key, val)
if len(positions) > 0:
self.dimension = len(positions[0])
else:
self.dimension = 3
if dimension is not None:
self.dimension = dimension
if cell is not None:
if pbc is None:
self.pbc = True # default setting
else:
self.pbc = pbc
self.set_initial_magnetic_moments(magmoms)
@property
def cell(self):
"""
numpy.ndarray: A size 3x3 array which gives the lattice vectors of the cell as [a1, a2, a3]
"""
return self._cell
@cell.setter
def cell(self, value):
if value is None:
self._cell = None
else:
if self._is_scaled:
self.set_cell(value, scale_atoms=True)
else:
self.set_cell(value)
@property
def species(self):
"""
list: A list of atomistics.structure.periodic_table.ChemicalElement instances
"""
return self._species
# @species.setter
def set_species(self, value):
"""
Setting the species list
Args:
value (list): A list atomistics.structure.periodic_table.ChemicalElement instances
"""
if value is None:
return
value = list(value)
self._species_to_index_dict = {el: i for i, el in enumerate(value)}
self._species = value[:]
self._store_elements = {el.Abbreviation: el for el in value}
@property
def info(self):
"""
dict: This dictionary is merely used to be compatible with the ASE Atoms class.
"""
return self._info
@info.setter
def info(self, val):
self._info = val
@property
def pbc(self):
"""
list: A list of boolean values which gives the periodic boundary consitions along the three axes.
The default value is [True, True, True]
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return self._pbc
@pbc.setter
def pbc(self, val):
self._pbc = val
@property
def elements(self):
"""
numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according
to the ordering of the atoms in the instance
"""
return np.array([self.species[el] for el in self.indices])
def new_array(self, name, a, dtype=None, shape=None):
"""
Adding a new array to the instance. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
if dtype is not None:
a = np.array(a, dtype, order='C')
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags['C_CONTIGUOUS']:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""
Get an array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the required array
copy (bool): True if a copy of the array is to be returned
Returns:
An array of a copy of the array
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""
Update array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def add_tag(self, *args, **qwargs):
"""
Add tags to the atoms object.
Examples:
For selective dynamics::
>>> self.add_tag(selective_dynamics=[False, False, False])
"""
self._tag_list.add_tag(*args, **qwargs)
# @staticmethod
def numbers_to_elements(self, numbers):
"""
Convert atomic numbers in element objects (needed for compatibility with ASE)
Args:
numbers (list): List of Element Numbers (as Integers; default in ASE)
Returns:
list: A list of elements as needed for pyiron
"""
# pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags
atom_number_to_element = {}
for i_el in set(numbers):
i_el = int(i_el)
atom_number_to_element[i_el] = self._pse.element(i_el)
return [atom_number_to_element[i_el] for i_el in numbers]
def copy(self):
"""
Returns a copy of the instance
Returns:
pyiron.atomistics.structure.atoms.Atoms: A copy of the instance
"""
return self.__copy__()
def to_hdf(self, hdf, group_name="structure"):
"""
Save the object in a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str):
Group name with which the object should be stored. This same name should be used to retrieve the object
"""
# import time
with hdf.open(group_name) as hdf_structure:
# time_start = time.time()
hdf_structure["TYPE"] = str(type(self))
for el in self.species:
if isinstance(el.tags, dict):
with hdf_structure.open("new_species") as hdf_species:
el.to_hdf(hdf_species)
hdf_structure['species'] = [el.Abbreviation for el in self.species]
hdf_structure["indices"] = self.indices
with hdf_structure.open("tags") as hdf_tags:
for tag in self._tag_list.keys():
tag_value = self._tag_list[tag]
if isinstance(tag_value, SparseList):
tag_value.to_hdf(hdf_tags, tag)
hdf_structure["units"] = self.units
hdf_structure["dimension"] = self.dimension
if self.cell is not None:
with hdf_structure.open("cell") as hdf_cell:
hdf_cell["cell"] = self.cell
hdf_cell["pbc"] = self.pbc
# hdf_structure["coordinates"] = self.positions # "Atomic coordinates"
hdf_structure["positions"] = self.positions # "Atomic coordinates"
# potentials with explicit bonds (TIP3P, harmonic, etc.)
if self.bonds is not None:
hdf_structure["explicit_bonds"] = self.bonds
# print ('time in atoms.to_hdf: ', time.time() - time_start)
def from_hdf(self, hdf, group_name="structure"):
"""
Retrieve the object from a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str): Group name from which the Atoms object is retreived.
Returns:
pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class
"""
if "indices" in hdf[group_name].list_nodes():
with hdf.open(group_name) as hdf_atoms:
if "new_species" in hdf_atoms.list_groups():
with hdf_atoms.open("new_species") as hdf_species:
self._pse.from_hdf(hdf_species)
el_object_list = [self.convert_element(el, self._pse) for el in hdf_atoms["species"]]
self.indices = hdf_atoms["indices"]
self._tag_list._length = len(self)
self.set_species(el_object_list)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
self.units = hdf_atoms["units"]
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
# Backward compatibility
position_tag = "positions"
if position_tag not in hdf_atoms.list_nodes():
position_tag = "coordinates"
if "is_absolute" in hdf_atoms.list_nodes():
if not tr_dict[hdf_atoms["is_absolute"]]:
self.set_scaled_positions(hdf_atoms[position_tag])
else:
self.positions = hdf_atoms[position_tag]
else:
self.positions = hdf_atoms[position_tag]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
else:
return self._from_hdf_old(hdf, group_name)
def _from_hdf_old(self, hdf, group_name="structure"):
"""
This function exits merely for the purpose of backward compatibility
"""
with hdf.open(group_name) as hdf_atoms:
self._pse = PeriodicTable()
if "species" in hdf_atoms.list_groups():
with hdf_atoms.open("species") as hdf_species:
self._pse.from_hdf(hdf_species)
chemical_symbols = np.array(hdf_atoms["elements"], dtype=str)
el_object_list = [self.convert_element(el, self._pse) for el in chemical_symbols]
self.set_species(list(set(el_object_list)))
self.indices = [self._species_to_index_dict[el] for el in el_object_list]
self._tag_list._length = len(self)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]:
self.positions = hdf_atoms["coordinates"]
else:
self.set_scaled_positions(hdf_atoms["coordinates"])
self.units = hdf_atoms["units"]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
def center(self, vacuum=None, axis=(0, 1, 2)):
"""
Center atoms in unit cell.
Adopted from ASE code (https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.center)
Args:
vacuum (float): If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be
10 Angstrom of vacuum on each side.
axis (tuple/list): List or turple of integers specifying the axis along which the atoms should be centered
"""
# Find the orientations of the faces of the unit cell
c = self.cell
if c is None:
c = np.identity(self.dimension)
self.cell = c
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.linalg.norm(dirs[i]) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.positions
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.linalg.norm(c[i])
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self.cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.positions += translation
if self.pbc is None:
self.pbc = self.dimension * [True]
def set_positions(self, positions):
"""
Set positions. This function is for compatability with ASE
Args:
positions (numpy.ndarray/list): Positions in absolute coordinates
"""
self.positions = np.array(positions)
self._tag_list._length = len(self)
def get_positions(self):
"""
Get positions. This function is for compatability with ASE
Returns:
numpy.ndarray: Positions in absolute coordinates
"""
return self.positions
def select_index(self, el):
"""
Returns the indices of a given element in the structure
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement/list): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
if isinstance(el, str):
return np.where(self.get_chemical_symbols()==el)[0]
elif isinstance(el, ChemicalElement):
return np.where([e==el for e in self.get_chemical_elements()])[0]
if isinstance(el, (list, np.ndarray)):
if isinstance(el[0], str):
return np.where(np.isin(self.get_chemical_symbols(), el))[0]
elif isinstance(el[0], ChemicalElement):
return np.where([e in el for e in self.get_chemical_elements()])[0]
def select_parent_index(self, el):
"""
Returns the indices of a given element in the structure ignoring user defined elements
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
parent_basis = self.get_parent_basis()
return parent_basis.select_index(el)
def get_tags(self):
"""
Returns the keys of the stored tags of the structure
Returns:
dict_keys: Keys of the stored tags
"""
return self._tag_list.keys()
def get_pbc(self):
"""
Returns a boolean array of the periodic boundary conditions along the x, y and z axis respectively
Returns:
numpy.ndarray: Boolean array of length 3
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return np.array(self._pbc, bool)
def set_pbc(self, value):
"""
Sets the perioic boundary conditions on all three axis
Args:
value (numpy.ndarray/list): An array of bool type with length 3
"""
if value is None:
self._pbc = None
else:
if isinstance(value, np.ndarray):
self._pbc = value
elif value in (True, False):
value = self.dimension * [value]
if not (np.shape(np.array(value)) == (self.dimension,)):
raise AssertionError()
self._pbc = np.array(value, bool)
def convert_element(self, el, pse=None):
"""
Convert a string or an atom instance into a ChemicalElement instance
Args:
el (str/atomistics.structure.atom.Atom): String or atom instance from which the element should
be generated
pse (atomistics.structure.periodictable.PeriodicTable): PeriodicTable instance from which the element
is generated (optional)
Returns:
atomistics.structure.periodictable.ChemicalElement: The required chemical element
"""
if el in list(self._store_elements.keys()):
return self._store_elements[el]
if isinstance(el, string_types): # as symbol
element = Atom(el, pse=pse).element
elif isinstance(el, Atom):
element = el.element
el = el.element.Abbreviation
elif isinstance(el, ChemicalElement):
element = el
el = el.Abbreviation
else:
raise ValueError('Unknown static type to specify a element')
self._store_elements[el] = element
if hasattr(self, 'species'):
if element not in self.species:
self._species.append(element)
self.set_species(self._species)
return element
def get_chemical_formula(self):
"""
Returns the chemical formula of structure
Returns:
str: The chemical formula as a string
"""
species = self.get_number_species_atoms()
formula = ""
for string_sym, num in species.items():
if num == 1:
formula += str(string_sym)
else:
formula += str(string_sym) + str(num)
return formula
def get_chemical_indices(self):
"""
Returns the list of chemical indices as ordered in self.species
Returns:
numpy.ndarray: A list of chemical indices
"""
return self.indices
def get_atomic_numbers(self):
"""
Returns the atomic numbers of all the atoms in the structure
Returns:
numpy.ndarray: A list of atomic numbers
"""
el_lst = [el.AtomicNumber for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_chemical_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure
Returns:
numpy.ndarray: A list of chemical symbols
"""
el_lst = [el.Abbreviation for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_parent_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure even for user defined elements
Returns:
numpy.ndarray: A list of chemical symbols
"""
sp_parent_list = list()
for sp in self.species:
if isinstance(sp.Parent, (float, np.float, type(None))):
sp_parent_list.append(sp.Abbreviation)
else:
sp_parent_list.append(sp.Parent)
return np.array([sp_parent_list[i] for i in self.indices])
def get_parent_basis(self):
"""
Returns the basis with all user defined/special elements as the it's parent
Returns:
pyiron.atomistics.structure.atoms.Atoms: Structure without any user defined elements
"""
parent_basis = copy(self)
new_species = np.array(parent_basis.species)
for i, sp in enumerate(new_species):
if not isinstance(sp.Parent, (float, np.float, type(None))):
pse = PeriodicTable()
new_species[i] = pse.element(sp.Parent)
sym_list = [el.Abbreviation for el in new_species]
if len(sym_list) != len(np.unique(sym_list)):
uni, ind, inv_ind = np.unique(sym_list, return_index=True, return_inverse=True)
new_species = new_species[ind].copy()
parent_basis.set_species(list(new_species))
indices_copy = parent_basis.indices.copy()
for i, ind_ind in enumerate(inv_ind):
indices_copy[parent_basis.indices == i] = ind_ind
parent_basis.indices = indices_copy
return parent_basis
parent_basis.set_species(list(new_species))
return parent_basis
def get_chemical_elements(self):
"""
Returns the list of chemical element instances
Returns:
numpy.ndarray: A list of chemical element instances
"""
return self.elements
def get_number_species_atoms(self):
"""
Returns a dictionary with the species in the structure and the corresponding count in the structure
Returns:
collections.OrderedDict: An ordered dictionary with the species and the corresponding count
"""
count = OrderedDict()
# print "sorted: ", sorted(set(self.elements))
for el in sorted(set(self.get_chemical_symbols())):
count[el] = 0
for el in self.get_chemical_symbols():
count[el] += 1
return count
def get_species_symbols(self):
"""
Returns the symbols of the present species
Returns:
numpy.ndarray: List of the symbols of the species
"""
return np.array(sorted([el.Abbreviation for el in self.species]))
def get_species_objects(self):
"""
Returns:
"""
el_set = self.species
el_sym_lst = {el.Abbreviation: i for i, el in enumerate(el_set)}
el_sorted = self.get_species_symbols()
return [el_set[el_sym_lst[el]] for el in el_sorted]
def get_number_of_species(self):
"""
Returns:
"""
return len(self.species)
def get_number_of_degrees_of_freedom(self):
"""
Returns:
"""
return len(self) * self.dimension
def get_center_of_mass(self):
"""
Returns:
com (float): center of mass in A
"""
masses = self.get_masses()
return np.einsum('i,ij->j', masses, self.positions)/np.sum(masses)
def get_masses(self):
"""
Returns:
"""
el_lst = [el.AtomicMass for el in self.species]
return [el_lst[el] for el in self.indices]
def get_masses_dof(self):
"""
Returns:
"""
dim = self.dimension
return np.repeat(self.get_masses(), dim)
def get_volume(self, per_atom=False):
"""
Args:
per_atom (bool): True if volume per atom is to be returned
Returns:
volume (float): Volume in A**3
"""
if per_atom:
return np.abs(np.linalg.det(self.cell))/len(self)
else:
return np.abs(np.linalg.det(self.cell))
def get_density(self):
"""
Returns the density in g/cm^3
Returns:
float: Density of the structure
"""
# conv_factor = Ang3_to_cm3/scipi.constants.Avogadro
# with Ang3_to_cm3 = 1e24
conv_factor = 1.660539040427164
return conv_factor * np.sum(self.get_masses()) / self.get_volume()
def get_scaled_positions(self, wrap=True):
"""
Returns:
"""
pbc = np.array(self.pbc)
positions = np.einsum('jk,ij->ik', np.linalg.inv(self.cell), self.positions)
if wrap:
positions[:, pbc] = np.mod(positions[:, pbc], 1.)
return positions
def get_number_of_atoms(self):
"""
Returns:
"""
# assert(len(self) == np.sum(self.get_number_species_atoms().values()))
return len(self)
def set_absolute(self):
if self._is_scaled:
self._is_scaled = False
def set_relative(self):
if not self._is_scaled:
self._is_scaled = True
def center_coordinates_in_unit_cell(self, origin=0, eps=1e-4):
"""
compact atomic coordinates in supercell as given by a1, a2., a3
Args:
origin: 0 to confine between 0 and 1, -0.5 to confine between -0.5 and 0.5
eps:
Returns:
"""
self.set_scaled_positions(np.mod(self.get_scaled_positions(wrap=False) + eps, 1) - eps + origin)
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
def set_repeat(self, vec):
self *= vec
def reset_absolute(self, is_absolute):
raise NotImplementedError('This function was removed!')
def analyse_ovito_cna_adaptive(self, mode='total'):
from pyiron.atomistics.structure.ovito import analyse_ovito_cna_adaptive
warnings.filterwarnings("ignore")
return analyse_ovito_cna_adaptive(atoms=self, mode=mode)
def analyse_ovito_centro_symmetry(atoms, num_neighbors=12):
from pyiron.atomistics.structure.ovito import analyse_ovito_centro_symmetry
warnings.filterwarnings("ignore")
return analyse_ovito_centro_symmetry(atoms, num_neighbors=num_neighbors)
def analyse_ovito_voronoi_volume(atoms):
from pyiron.atomistics.structure.ovito import analyse_ovito_voronoi_volume
warnings.filterwarnings("module")
return analyse_ovito_voronoi_volume(atoms)
def analyse_phonopy_equivalent_atoms(atoms):
from pyiron.atomistics.structure.phonopy import analyse_phonopy_equivalent_atoms
#warnings.filterwarnings("ignore")
warnings.warn("analyse_phonopy_equivalent_atoms() is obsolete use get_symmetry()['equivalent_atoms'] instead")
return analyse_phonopy_equivalent_atoms(atoms)
@staticmethod
def _ngl_write_cell(a1, a2, a3, f1=90, f2=90, f3=90):
"""
Writes a PDB-formatted line to represent the simulation cell.
Args:
a1, a2, a3 (float): Lengths of the cell vectors.
f1, f2, f3 (float): Angles between the cell vectors (which angles exactly?) (in degrees).
Returns:
(str): The line defining the cell in PDB format.
"""
return 'CRYST1 {:8.3f} {:8.3f} {:8.3f} {:6.2f} {:6.2f} {:6.2f} P 1\n'.format(a1, a2, a3, f1, f2, f3)
@staticmethod
def _ngl_write_atom(num, species, x, y, z, group=None, num2=None, occupancy=1., temperature_factor=0.):
"""
Writes a PDB-formatted line to represent an atom.
Args:
num (int): Atomic index.
species (str): Elemental species.
x, y, z (float): Cartesian coordinates of the atom.
group (str): A...group name? (Default is None, repeat elemental species.)
num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.)
occupancy (float): PDB occupancy parameter. (Default is 1.)
temperature_factor (float): PDB temperature factor parameter. (Default is 0.
Returns:
(str): The line defining an atom in PDB format
Warnings:
* The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that
the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour,
this might be a good place to investigate.
"""
if group is None:
group = species
if num2 is None:
num2 = num
return 'ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n'.format(
num, species, group, num2, x, y, z, occupancy, temperature_factor, species)
def _ngl_write_structure(self, elements, positions, cell):
"""
Turns structure information into a NGLView-readable protein-database-formatted string.
Args:
elements (numpy.ndarray/list): Element symbol for each atom.
positions (numpy.ndarray/list): Vector of Cartesian atom positions.
cell (numpy.ndarray/list): Simulation cell Bravais matrix.
Returns:
(str): The PDB-formatted representation of the structure.
"""
from ase.geometry import cell_to_cellpar, cellpar_to_cell
cellpar = cell_to_cellpar(cell)
exportedcell = cellpar_to_cell(cellpar)
rotation = np.linalg.solve(cell, exportedcell)
pdb_str = self._ngl_write_cell(*cellpar)
pdb_str += 'MODEL 1\n'
if rotation is not None:
positions = np.array(positions).dot(rotation)
for i, p in enumerate(positions):
pdb_str += self._ngl_write_atom(i, elements[i], *p)
pdb_str += 'ENDMDL \n'
return pdb_str
def _atomic_number_to_radius(self, atomic_number, shift=0.2, slope=0.1, scale=1.0):
"""
Give the atomic radius for plotting, which scales like the root of the atomic number.
Args:
atomic_number (int/float): The atomic number.
shift (float): A constant addition to the radius. (Default is 0.2.)
slope (float): A multiplier for the root of the atomic number. (Default is 0.1)
scale (float): How much to rescale the whole thing by.
Returns:
(float): The radius. (Not physical, just for visualization!)
"""
return (shift + slope * np.sqrt(atomic_number)) * scale
def _add_colorscheme_spacefill(self, view, elements, atomic_numbers, particle_size, scheme='element'):
"""
Set NGLView spacefill parameters according to a color-scheme.
Args:
view (NGLWidget): The widget to work on.
elements (numpy.ndarray/list): Elemental symbols.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
scheme (str): The scheme to use. (Default is "element".)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The modified widget.
"""
for elem, num in set(list(zip(elements, atomic_numbers))):
view.add_spacefill(selection='#' + elem,
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color_scheme=scheme)
return view
def _add_custom_color_spacefill(self, view, atomic_numbers, particle_size, colors):
"""
Set NGLView spacefill parameters according to per-atom colors.
Args:
view (NGLWidget): The widget to work on.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
colors (numpy.ndarray/list): A per-atom list of HTML or hex color codes.
Returns:
(nglview.NGLWidget): The modified widget.
"""
for n, num in enumerate(atomic_numbers):
view.add_spacefill(selection=[n],
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color=colors[n])
return view
@staticmethod
def _scalars_to_hex_colors(scalar_field, start=None, end=None, cmap=None):
"""
Convert scalar values to hex codes using a colormap.
Args:
scalar_field (numpy.ndarray/list): Scalars to convert.
start (float): Scalar value to map to the bottom of the colormap (values below are clipped). (Default is
None, use the minimal scalar value.)
end (float): Scalar value to map to the top of the colormap (values above are clipped). (Default is
None, use the maximal scalar value.)
cmap (matplotlib.cm): The colormap to use. (Default is None, which gives a blue-red divergent map.)
Returns:
(list): The corresponding hex codes for each scalar value passed in.
"""
if start is None:
start = np.amin(scalar_field)
if end is None:
end = np.amax(scalar_field)
interp = interp1d([start, end], [0, 1])
remapped_field = interp(np.clip(scalar_field, start, end)) # Map field onto [0,1]
if cmap is None:
try:
from seaborn import diverging_palette
except ImportError:
print("The package seaborn needs to be installed for the plot3d() function!")
cmap = diverging_palette(245, 15, as_cmap=True) # A nice blue-red palette
return [rgb2hex(cmap(scalar)[:3]) for scalar in remapped_field] # The slice gets RGB but leaves alpha
def plot3d(self, show_cell=True, show_axes=True, camera='orthographic', spacefill=True, particle_size=1.0,
select_atoms=None, background='white', color_scheme=None, colors=None,
scalar_field=None, scalar_start=None, scalar_end=None, scalar_cmap=None,
vector_field=None, vector_color=None, custom_array=None, custom_3darray=None):
"""
Plot3d relies on NGLView to visualize atomic structures. Here, we construct a string in the "protein database"
("pdb") format, then turn it into an NGLView "structure". PDB is a white-space sensitive format, so the
string snippets are carefully formatted.
The final widget is returned. If it is assigned to a variable, the visualization is suppressed until that
variable is evaluated, and in the meantime more NGL operations can be applied to it to modify the visualization.
Args:
show_cell (bool): Whether or not to show the frame. (Default is True.)
show_axes (bool): Whether or not to show xyz axes. (Default is True.)
camera (str): 'perspective' or 'orthographic'. (Default is 'perspective'.)
spacefill (bool): Whether to use a space-filling or ball-and-stick representation. (Default is True, use
space-filling atoms.)
particle_size (float): Size of the particles. (Default is 1.)
select_atoms (numpy.ndarray): Indices of atoms to show, either as integers or a boolean array mask.
(Default is None, show all atoms.)
background (str): Background color. (Default is 'white'.)
color_scheme (str): NGLView color scheme to use. (Default is None, color by element.)
colors (numpy.ndarray): A per-atom array of HTML color names or hex color codes to use for atomic colors.
(Default is None, use coloring scheme.)
scalar_field (numpy.ndarray): Color each atom according to the array value (Default is None, use coloring
scheme.)
scalar_start (float): The scalar value to be mapped onto the low end of the color map (lower values are
clipped). (Default is None, use the minimum value in `scalar_field`.)
scalar_end (float): The scalar value to be mapped onto the high end of the color map (higher values are
clipped). (Default is None, use the maximum value in `scalar_field`.)
scalar_cmap (matplotlib.cm): The colormap to use. (Default is None, giving a blue-red divergent map.)
vector_field (numpy.ndarray): Add vectors (3 values) originating at each atom. (Default is None, no
vectors.)
vector_color (numpy.ndarray): Colors for the vectors (only available with vector_field). (Default is None,
vectors are colored by their direction.)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The NGLView widget itself, which can be operated on further or viewed as-is.
Warnings:
* Many features only work with space-filling atoms (e.g. coloring by a scalar field).
* The colour interpretation of some hex codes is weird, e.g. 'green'.
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
if custom_array is not None:
warnings.warn('custom_array is deprecated. Use scalar_field instead', DeprecationWarning)
scalar_field = custom_array
if custom_3darray is not None:
warnings.warn('custom_3darray is deprecated. Use vector_field instead', DeprecationWarning)
vector_field = custom_3darray
parent_basis = self.get_parent_basis()
elements = parent_basis.get_chemical_symbols()
atomic_numbers = parent_basis.get_atomic_numbers()
positions = self.positions
# If `select_atoms` was given, visualize only a subset of the `parent_basis`
if select_atoms is not None:
select_atoms = np.array(select_atoms, dtype=int)
elements = elements[select_atoms]
atomic_numbers = atomic_numbers[select_atoms]
positions = positions[select_atoms]
if colors is not None:
colors = np.array(colors)
colors = colors[select_atoms]
if scalar_field is not None:
scalar_field = np.array(scalar_field)
scalar_field = scalar_field[select_atoms]
if vector_field is not None:
vector_field = np.array(vector_field)
vector_field = vector_field[select_atoms]
if vector_color is not None:
vector_color = np.array(vector_color)
vector_color = vector_color[select_atoms]
# Write the nglview protein-database-formatted string
struct = nglview.TextStructure(self._ngl_write_structure(elements, positions, self.cell))
# Parse the string into the displayable widget
view = nglview.NGLWidget(struct)
if spacefill:
# Color by scheme
if color_scheme is not None:
if colors is not None:
warnings.warn('`color_scheme` is overriding `colors`')
if scalar_field is not None:
warnings.warn('`color_scheme` is overriding `scalar_field`')
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size, color_scheme)
# Color by per-atom colors
elif colors is not None:
if scalar_field is not None:
warnings.warn('`colors` is overriding `scalar_field`')
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by per-atom scalars
elif scalar_field is not None: # Color by per-atom scalars
colors = self._scalars_to_hex_colors(scalar_field, scalar_start, scalar_end, scalar_cmap)
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by element
else:
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if vector_color is None and vector_field is not None:
vector_color = 0.5 * vector_field / np.linalg.norm(vector_field, axis=-1)[:, np.newaxis] + 0.5
elif vector_field is not None and vector_field is not None: # WARNING: There must be a bug here...
try:
if vector_color.shape != np.ones((len(self), 3)).shape:
vector_color = np.outer(np.ones(len(self)), vector_color / np.linalg.norm(vector_color))
except AttributeError:
vector_color = np.ones((len(self), 3)) * vector_color
if vector_field is not None:
for arr, pos, col in zip(vector_field, positions, vector_color):
view.shape.add_arrow(list(pos), list(pos + arr), list(col), 0.2)
if show_axes: # Add axes
axes_origin = -np.ones(3)
arrow_radius = 0.1
text_size = 1
text_color = [0, 0, 0]
arrow_names = ['x', 'y', 'z']
for n in [0, 1, 2]:
start = list(axes_origin)
shift = np.zeros(3)
shift[n] = 1
end = list(start + shift)
color = list(shift)
# We cast as list to avoid JSON warnings
view.shape.add_arrow(start, end, color, arrow_radius)
view.shape.add_text(end, text_color, text_size, arrow_names[n])
if camera != 'perspective' and camera != 'orthographic':
warnings.warn('Only perspective or orthographic is (likely to be) permitted for camera')
view.camera = camera
view.background = background
return view
def plot3d_ase(self, spacefill=True, show_cell=True, camera='perspective', particle_size=0.5, background='white', color_scheme='element', show_axes=True):
"""
Possible color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
# Always visualize the parent basis
parent_basis = self.get_parent_basis()
view = nglview.show_ase(parent_basis)
if spacefill:
view.add_spacefill(radius_type='vdw', color_scheme=color_scheme, radius=particle_size)
# view.add_spacefill(radius=1.0)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if show_axes:
view.shape.add_arrow([-2, -2, -2], [2, -2, -2], [1, 0, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, 2, -2], [0, 1, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, -2, 2], [0, 0, 1], 0.5)
if camera!='perspective' and camera!='orthographic':
print('Only perspective or orthographic is permitted')
return None
view.camera = camera
view.background = background
return view
def pos_xyz(self):
"""
Returns:
"""
x = self.positions[:, 0]
y = self.positions[:, 1]
z = self.positions[:, 2]
return x, y, z
def scaled_pos_xyz(self):
"""
Returns:
"""
xyz = self.get_scaled_positions(wrap=False)
return xyz[:,0], xyz[:,1], xyz[:,2]
def __select_slice(self, i_dim, i_flag, dist):
"""
Args:
i_dim:
i_flag:
dist:
Returns:
"""
if i_dim + 1 > self.dimension:
return True
if i_flag == 1:
return self.get_scaled_positions(wrap=False)[:, i_dim] < dist
elif i_flag == 0:
return True
elif i_flag == -1:
return self.get_scaled_positions(wrap=False)[:, i_dim] > 1. - dist
def get_boundary_region(self, dist):
"""
get all atoms in the boundary around the supercell which have a distance
to the supercell boundary of less than dist
Args:
dist:
Returns:
"""
rel_coordinates = self.get_scaled_positions(wrap=False)
dim = self.dimension
cell = self.cell.T # to use same definition as ASE
a1 = cell[0]
a2, a3 = 0, 0
min_i, max_i = -1, 2
iyl, iy, izl, iz = 0, 1, 0, 1
if dim > 1:
a2 = cell[1]
iyl, iy = min_i, max_i
if dim > 2:
a3 = cell[2]
izl, iz = min_i, max_i
index = np.arange(len(self))
new_coordinates = np.zeros((1, dim))
# pbcVec = np.zeros((1, dim))
ia_list = np.zeros((1, 1), dtype=np.int)
for i0 in range(min_i, max_i):
for i1 in range(iyl, iy):
for i2 in range(izl, iz):
# r_vec_abs = i0 * a1 + i1 * a2 + i2 * a3
r_vec = np.array([i0, i1, i2][:dim])
select = self.__select_slice(0, i0, dist) & self.__select_slice(1, i1, dist) & \
self.__select_slice(2, i2, dist)
if np.linalg.norm(r_vec) > 0:
if len(select) > 0:
sel_coordinates = rel_coordinates[select] + r_vec
new_coordinates = np.append(new_coordinates, sel_coordinates, axis=0)
if len(sel_coordinates) > 0:
# rVecs = np.array(len(sel_coordinates) * [r_vec_abs])
# pbcVec = np.append(pbcVec, rVecs, axis=0)
ia_list = np.append(ia_list, index[select])
# print "rVec: ", i0,i1,i2,rVecs[0],index[select],select
element_list = [self.indices[ia] for ia in ia_list[1:]]
self._ia_bounds = ia_list[1:]
# self._pbcVec = pbcVec[1:]
return Atoms(indices=element_list, scaled_positions=new_coordinates[1:], cell=self.cell,
dimension=len(cell), species=self.species)
def get_neighbors(self,
num_neighbors=12,
t_vec=True,
include_boundary=True,
exclude_self=True,
tolerance=2,
id_list=None,
cutoff_radius=None,
cutoff=None):
"""
Args:
num_neighbors (int): number of neighbors
t_vec (bool): True: compute distance vectors
(pbc are automatically taken into account)
include_boundary (bool): True: search for neighbors assuming periodic boundary conditions
False is needed e.g. in plot routines to avoid showing incorrect bonds
exclude_self (bool): include central __atom (i.e. distance = 0)
tolerance (int): tolerance (round decimal points) used for computing neighbor shells
id_list:
cutoff (float/None): Upper bound of the distance to which the search must be done - by default search for
upto 100 neighbors unless num_neighbors is defined explicitly.
cutoff_radius (float/None): Upper bound of the distance to which the search must be done - by default search
for upto 100 neighbors unless num_neighbors is defined explicitly.
Returns:
pyiron.atomistics.structure.atoms.Neighbors: Neighbors instances with the neighbor indices, distances
and vectors
"""
if cutoff is not None and cutoff_radius is None:
warnings.warn('Please use cutoff_radius, rather than cutoff', DeprecationWarning)
cutoff_radius = cutoff
if cutoff_radius is not None and num_neighbors == 12:
num_neighbors = 100
# eps = 1e-4
i_start = 0
if exclude_self:
i_start = 1
def f_ind(x):
return x < len(self)
num_neighbors += 1
neighbor_obj = Neighbors()
if not include_boundary: # periodic boundaries are NOT included
tree = cKDTree(self.positions)
if cutoff_radius is None:
neighbors = tree.query(self.positions, k=num_neighbors)
else:
neighbors = tree.query(self.positions, k=num_neighbors, distance_upper_bound=cutoff_radius)
d_lst, ind_lst, v_lst = [], [], []
ic = 0
for d_i, ind_i in zip(neighbors[0], neighbors[1]):
ff = (ind_i < len(self)) & (ind_i != ic)
ind_l = ind_i[ff]
ind_lst.append(ind_l)
d_lst.append(d_i[ff])
v_lst.append(self.positions[ind_l] - self.positions[ic])
ic += 1
neighbor_obj.indices = ind_lst
neighbor_obj.distances = d_lst
neighbor_obj.vecs = v_lst
return neighbor_obj
# include periodic boundaries
# translate radius in boundary layer with relative coordinates
# TODO: introduce more rigoros definition
radius = 3 * num_neighbors ** (1. / 3.)
rel_width = [radius / np.sqrt(np.dot(a_i, a_i)) for a_i in self.cell]
rel_width_scalar = np.max(rel_width)
# construct cell with additional atoms bounding original cell
boundary_atoms = self.get_boundary_region(rel_width_scalar)
extended_cell = self + boundary_atoms
# build index to map boundary atoms back to original cell
map_to_cell = np.append(np.arange(len(self)), self._ia_bounds)
# transfer relative to absolute coordinates
tree = cKDTree(extended_cell.positions)
if id_list is None:
positions = self.positions
else:
positions = np.array([self.positions[i] for i in id_list])
# print ("len positions: ", len(positions))
if cutoff_radius is None:
neighbors = tree.query(positions, k=num_neighbors)
else:
neighbors = tree.query(positions, k=num_neighbors, distance_upper_bound=cutoff_radius)
# print ("neighbors: ", neighbors)
self.neighbor_distance = [] # neighbors[0]
self.neighbor_distance_vec = []
self.neighbor_index = []
self.neighbor_shellOrder = []
# tolerance = 2 # tolerance for round floating point
def f_ind_ext(x):
return x < len(extended_cell)
neighbor_index = map(lambda x: filter(f_ind_ext, x), neighbors[1])
num_neighbors = []
for i, index in enumerate(neighbor_index):
# print "i, index: ", i, index
index = list(index) # Filter conversion for python 3 compatibility
nbrs_distances = neighbors[0][i][i_start:len(index)]
# if radius: # reduce neighborlist based on radius
# new_index_lst, new_dist_lst = [], []
# for index_red, dis_red in zip(index, nbrs_distances):
# if dis_red < radius:
# new_index_lst.append(index_red)
# new_dist_lst.append(dis_red)
# index, nbrs_distances= new_index_lst, new_dist_lst
self.neighbor_distance.append(nbrs_distances)
self.neighbor_index.append(map_to_cell[index][i_start:])
u, indices = np.unique(np.around(nbrs_distances, decimals=tolerance), return_inverse=True)
self.neighbor_shellOrder.append(indices + 1) # this gives the shellOrder of neighboring atoms back
if t_vec:
nbr_dist = []
if len(index) == 0:
self.neighbor_distance_vec.append(nbr_dist)
continue
vec0 = self.positions[index[0]]
for i_nbr, ind in enumerate(index[i_start:]):
# ind0 = map_to_cell[ind]
vec_r_ij = extended_cell.positions[ind] - vec0
dd0 = neighbors[0][i][i_nbr + i_start]
dd = np.sqrt(np.dot(vec_r_ij, vec_r_ij))
if not (dd - dd0 < 0.001):
raise AssertionError()
# if (dd - dd0 > 0.001):
# print "wrong: ", vec_r_ij, dd,dd0,i_nbr,ind,ind0,i
# print self.positions[ind0], extended_cell.positions[ind], vec0
nbr_dist.append(vec_r_ij)
self.neighbor_distance_vec.append(nbr_dist)
num_neighbors.append(len(index) - i_start)
min_nbr, max_nbr = min(num_neighbors), max(num_neighbors)
if max_nbr == num_neighbors:
# print "neighbor distance: ", self.neighbor_distance
raise ValueError("Increase max_num_neighbors! " + str(max_nbr) + " " + str(num_neighbors))
self.min_nbr_number = min_nbr
self.max_nbr_number = max_nbr
neighbor_obj.distances = self.neighbor_distance
neighbor_obj.vecs = self.neighbor_distance_vec
neighbor_obj.indices = self.neighbor_index
neighbor_obj.shells = self.neighbor_shellOrder
return neighbor_obj
def get_neighborhood(box, position, num_neighbors=12, t_vec=True, include_boundary=True,
tolerance=2, id_list=None, cutoff=None, cutoff_radius=None):
"""
Args:
position: position in a box whose neighborhood information is analysed
num_neighbors:
t_vec (bool): True: compute distance vectors
(pbc are automatically taken into account)
include_boundary (bool): True: search for neighbors assuming periodic boundary conditions
False is needed e.g. in plot routines to avoid showing incorrect bonds
tolerance (int): tolerance (round decimal points) used for computing neighbor shells
id_list:
cutoff (float/ None): Upper bound of the distance to which the search must be done
cutoff_radius (float/ None): Upper bound of the distance to which the search must be done
Returns:
pyiron.atomistics.structure.atoms.Neighbors: Neighbors instances with the neighbor indices, distances
and vectors
"""
class NeighTemp(object):
pass
box = box.copy()
box += box[-1]
pos = box.positions
pos[-1] = np.array(position)
box.positions = pos
neigh = box.get_neighbors(num_neighbors=num_neighbors, t_vec=t_vec,
include_boundary=include_boundary, exclude_self=True,
tolerance=tolerance, id_list=id_list, cutoff=cutoff, cutoff_radius=cutoff_radius)
neigh_return = NeighTemp()
setattr(neigh_return, 'distances', neigh.distances[-1])
setattr(neigh_return, 'shells', neigh.shells[-1])
setattr(neigh_return, 'vecs', neigh.vecs[-1])
setattr(neigh_return, 'indices', neigh.indices[-1])
neigh_return.distances = neigh_return.distances[neigh_return.indices!=len(box)-1]
neigh_return.shells = neigh_return.shells[neigh_return.indices!=len(box)-1]
neigh_return.vecs = np.array(neigh_return.vecs)[neigh_return.indices!=len(box)-1]
neigh_return.indices = neigh_return.indices[neigh_return.indices!=len(box)-1]
return neigh_return
def get_shells(self, id_list=None, max_shell=2, max_num_neighbors=100):
"""
Args:
id_list:
max_shell:
max_num_neighbors:
Returns:
"""
if id_list is None:
id_list = [0]
neighbors = self.get_neighbors(num_neighbors=max_num_neighbors,
id_list=id_list)
shells = neighbors.shells[0]
dist = neighbors.distances[0]
shell_dict = {}
for i_shell in set(shells):
if i_shell > max_shell:
break
shell_dict[i_shell] = np.mean(dist[shells == i_shell])
# print ("shells: ", i_shell, shell_dict[i_shell])
if not (max(shell_dict.keys()) == max_shell):
raise AssertionError()
return shell_dict
def get_shell_matrix(self, shell, id_list=None, restraint_matrix=None, max_num_neighbors=100):
"""
Args:
neigh_list: user defined get_neighbors (recommended if atoms are displaced from the ideal positions)
id_list: cf. get_neighbors
radius: cf. get_neighbors
max_num_neighbors: cf. get_neighbors
restraint_matrix: NxN matrix with True or False, where False will remove the entries.
If an integer is given the sum of the chemical indices corresponding to the number will
be set to True and the rest to False
Returns:
NxN matrix with 1 for the pairs of atoms in the given shell
"""
assert isinstance(shell, int) and shell > 0, "Parameter 'shell' must be an integer greater than 0"
neigh_list = self.get_neighbors(num_neighbors=max_num_neighbors,
id_list=id_list)
Natom = len(neigh_list.shells)
if restraint_matrix is None:
restraint_matrix = (np.ones((Natom, Natom)) == 1)
elif type(restraint_matrix) == list and len(restraint_matrix) == 2:
restraint_matrix = np.outer(1 * (self.get_chemical_symbols() == restraint_matrix[0]),
1 * (self.get_chemical_symbols() == restraint_matrix[1]))
restraint_matrix = ((restraint_matrix + restraint_matrix.transpose()) > 0)
shell_matrix = np.zeros((Natom, Natom))
for ii, ss in enumerate(neigh_list.shells):
unique, counts = np.unique(neigh_list.indices[ii][ss == np.array(shell)], return_counts=True)
shell_matrix[ii][unique] = counts
shell_matrix[restraint_matrix == False] = 0
return shell_matrix
def get_shell_radius(self, shell=1, id_list=None):
"""
Args:
shell:
id_list:
Returns:
"""
if id_list is None:
id_list = [0]
shells = self.get_shells(id_list=id_list, max_shell=shell + 1)
return np.mean(list(shells.values())[shell - 1:])
def occupy_lattice(self, **qwargs):
"""
Replaces specified indices with a given species
"""
new_species = list(np.array(self.species).copy())
new_indices = np.array(self.indices.copy())
for key, i_list in qwargs.items():
el = self._pse.element(key)
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
new_species.append(el)
new_indices[i_list] = len(new_species) - 1
else:
index = np.argwhere(np.array(new_species) == el).flatten()
new_indices[i_list] = index
delete_species_indices = list()
retain_species_indices = list()
for i, el in enumerate(new_species):
if len(np.argwhere(new_indices == i).flatten()) == 0:
delete_species_indices.append(i)
else:
retain_species_indices.append(i)
for i in delete_species_indices:
new_indices[new_indices >= i] += -1
new_species = np.array(new_species)[retain_species_indices]
self.set_species(new_species)
self.indices = new_indices
def cluster_analysis(self, id_list, neighbors=None, radius=None, return_cluster_sizes=False):
"""
Args:
id_list:
neighbors:
radius:
return_cluster_sizes:
Returns:
"""
if neighbors is None:
if radius is None:
radius = self.get_shell_radius()
# print "radius: ", radius
neighbors = self.get_neighbors(radius, t_vec=False)
self._neighbor_index = neighbors.indices
self._cluster = [0] * len(self)
c_count = 1
# element_list = self.get_atomic_numbers()
for ia in id_list:
# el0 = element_list[ia]
nbrs = self._neighbor_index[ia]
# print ("nbrs: ", ia, nbrs)
if self._cluster[ia] == 0:
self._cluster[ia] = c_count
self.__probe_cluster(c_count, nbrs, id_list)
c_count += 1
cluster = np.array(self._cluster)
cluster_dict = {i_c: np.where(cluster == i_c)[0].tolist() for i_c in range(1, c_count)}
if return_cluster_sizes:
sizes = [self._cluster.count(i_c + 1) for i_c in range(c_count - 1)]
return cluster_dict, sizes
return cluster_dict # sizes
def __probe_cluster(self, c_count, neighbors, id_list):
"""
Args:
c_count:
neighbors:
id_list:
Returns:
"""
for nbr_id in neighbors:
if self._cluster[nbr_id] == 0:
if nbr_id in id_list: # TODO: check also for ordered structures
self._cluster[nbr_id] = c_count
nbrs = self._neighbor_index[nbr_id]
self.__probe_cluster(c_count, nbrs, id_list)
# TODO: combine with corresponding routine in plot3d
def get_bonds(self, radius=None, max_shells=None, prec=0.1, num_neighbors=20):
"""
Args:
radius:
max_shells:
prec: minimum distance between any two clusters (if smaller considered to be single cluster)
num_neighbors:
Returns:
"""
def get_cluster(dist_vec, ind_vec, prec=prec):
ind_where = np.where(np.diff(dist_vec) > prec)[0] + 1
ind_vec_cl = [np.sort(group) for group in np.split(ind_vec, ind_where)]
dist_vec_cl = [np.mean(group) for group in np.split(dist_vec, ind_where)]
return ind_vec_cl, dist_vec_cl
neighbors = self.get_neighbors(cutoff_radius=radius,
num_neighbors=num_neighbors)
dist = neighbors.distances
ind = neighbors.indices
el_list = self.get_chemical_symbols()
ind_shell = []
for i_a, (d, i) in enumerate(zip(dist, ind)):
id_list, dist_lst = get_cluster(d[d < radius], i[d < radius])
# print ("id: ", d[d<radius], id_list, dist_lst)
ia_shells_dict = {}
for i_shell_list in id_list:
ia_shell_dict = {}
for i_s in i_shell_list:
el = el_list[i_s]
if el not in ia_shell_dict:
ia_shell_dict[el] = []
ia_shell_dict[el].append(i_s)
for el, ia_lst in ia_shell_dict.items():
if el not in ia_shells_dict:
ia_shells_dict[el] = []
if max_shells is not None:
if len(ia_shells_dict[el]) + 1 > max_shells:
continue
ia_shells_dict[el].append(ia_lst)
ind_shell.append(ia_shells_dict)
return ind_shell
# spglib calls
def get_symmetry(self, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
use_magmoms:
use_elements: True or False. If False, chemical elements will be ignored
symprec:
angle_tolerance:
Returns:
"""
lattice = np.array(self.get_cell().T, dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
if use_elements:
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
else:
numbers = np.ones_like(self.get_atomic_numbers(), dtype='intc')
if use_magmoms:
magmoms = self.get_initial_magnetic_moments()
return spglib.get_symmetry(cell=(lattice, positions, numbers, magmoms),
symprec=symprec,
angle_tolerance=angle_tolerance)
else:
return spglib.get_symmetry(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance)
def group_points_by_symmetry(self, points):
"""
This function classifies the points into groups according to the box symmetry given by spglib.
Args:
points: (np.array/list) nx3 array which contains positions
Returns: list of arrays containing geometrically equivalent positions
It is possible that the original points are not found in the returned list, as the positions outsie
the box will be projected back to the box.
"""
struct_copy = self.copy()
points = np.array(points).reshape(-1, 3)
struct_copy += Atoms(elements=len(points)*['Hs'], positions=points)
struct_copy.center_coordinates_in_unit_cell();
group_IDs = struct_copy.get_symmetry()['equivalent_atoms'][struct_copy.select_index('Hs')]
return [np.round(points[group_IDs==ID], decimals=8) for ID in np.unique(group_IDs)]
def _get_voronoi_vertices(self, minimum_dist=0.1):
"""
This function gives the positions of Voronoi vertices
This function does not work if there are Hs atoms in the box
Args:
minimum_dist: Minimum distance between two Voronoi vertices to be considered as one
Returns: Positions of Voronoi vertices, box
"""
vor = Voronoi(self.repeat(3*[2]).positions) # Voronoi package does not have periodic boundary conditions
b_cell_inv = np.linalg.inv(self.cell)
voro_vert = vor.vertices
for ind, v in enumerate(voro_vert):
pos = np.mean(voro_vert[(np.linalg.norm(voro_vert-v, axis=-1)<minimum_dist)], axis=0) # Find all points which are within minimum_dist
voro_vert[(np.linalg.norm(voro_vert-v, axis=-1)<0.5)] = np.array(3*[-10]) # Mark atoms to be deleted afterwards
voro_vert[ind] = pos
voro_vert = voro_vert[np.min(voro_vert, axis=-1)>-5]
voro_vert = np.dot(b_cell_inv.T, voro_vert.T).T # get scaled positions
voro_vert = voro_vert[(np.min(voro_vert, axis=-1)>0.499) & (np.max(voro_vert, axis=-1)<1.501)]
voro_vert = np.dot(self.cell.T, voro_vert.T).T # get true positions
box_copy = self.copy()
new_atoms = Atoms(cell=self.cell, symbols=['Hs']).repeat([len(voro_vert), 1, 1])
box_copy += new_atoms
pos_total = np.append(self.positions, voro_vert)
pos_total = pos_total.reshape(-1, 3)
box_copy.positions = pos_total
box_copy.center_coordinates_in_unit_cell();
neigh = box_copy.get_neighbors() # delete all atoms which lie within minimum_dist (including periodic boundary conditions)
while len(np.array(neigh.indices).flatten()[np.array(neigh.distances).flatten()<minimum_dist])!=0:
del box_copy[np.array(neigh.indices).flatten()[np.array(neigh.distances).flatten()<minimum_dist][0]]
neigh = box_copy.get_neighbors()
return pos_total, box_copy
def get_equivalent_voronoi_vertices(self, return_box=False, minimum_dist=0.1, symprec=1e-5, angle_tolerance=-1.0):
"""
This function gives the positions of spatially equivalent Voronoi vertices in lists, which
most likely represent interstitial points or vacancies (along with other high symmetry points)
Each list item contains an array of positions which are spacially equivalent.
This function does not work if there are Hs atoms in the box
Args:
return_box: True, if the box containing atoms on the positions of Voronoi vertices
should be returned (which are represented by Hs atoms)
minimum_dist: Minimum distance between two Voronoi vertices to be considered as one
Returns: List of numpy array positions of spacially equivalent Voronoi vertices
"""
_, box_copy = self._get_voronoi_vertices(minimum_dist = minimum_dist)
list_positions = []
sym = box_copy.get_symmetry(symprec=symprec, angle_tolerance=angle_tolerance)
for ind in set(sym['equivalent_atoms'][box_copy.select_index('Hs')]):
list_positions.append(box_copy.positions[sym['equivalent_atoms']==ind])
if return_box:
return list_positions, box_copy
else:
return list_positions
def get_symmetry_dataset(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
lattice = np.array(self.get_cell().T, dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
return spglib.get_symmetry_dataset(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance)
def get_spacegroup(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
lattice = np.array(self.get_cell(), dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
space_group = spglib.get_spacegroup(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance).split()
if len(space_group) == 1:
return {"Number": ast.literal_eval(space_group[0])}
else:
return {"InternationalTableSymbol": space_group[0],
"Number": ast.literal_eval(space_group[1])}
def refine_cell(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
https://atztogo.github.io/spglib/python-spglib.html
"""
lattice = np.array(self.get_cell().T, dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
cell, coords, el = spglib.refine_cell(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance)
return Atoms(symbols=list(self.get_chemical_symbols()),
positions=coords,
cell=cell)
def get_primitive_cell(self, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
symprec:
angle_tolerance:
Returns:
"""
el_dict = {}
for el in set(self.get_chemical_elements()):
el_dict[el.AtomicNumber] = el
lattice = np.array(self.get_cell().T, dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
cell, coords, atomic_numbers = spglib.find_primitive(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance)
# print atomic_numbers, type(atomic_numbers)
el_lst = [el_dict[i_a] for i_a in atomic_numbers]
# convert lattice vectors to standard (experimental feature!) TODO:
red_structure = Atoms(elements=el_lst,
scaled_positions=coords,
cell=cell)
space_group = red_structure.get_spacegroup(symprec)["Number"]
# print "space group: ", space_group
if space_group == 225: # fcc
alat = np.max(cell[0])
amat_fcc = alat * np.array([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
red_structure.cell = amat_fcc
return red_structure
def get_ir_reciprocal_mesh(self, mesh, is_shift=np.zeros(3, dtype='intc'), is_time_reversal=True, symprec=1e-5):
"""
Args:
mesh:
is_shift:
is_time_reversal:
symprec:
Returns:
"""
mapping, mesh_points = spglib.get_ir_reciprocal_mesh(mesh=mesh, cell=self, is_shift=is_shift,
is_time_reversal=is_time_reversal, symprec=symprec)
return mapping, mesh_points
def get_equivalent_atoms(self, eps=1e-5):
"""
Args:
eps:
Returns:
"""
sym = self.get_symmetry()
coords = np.mod(self.get_scaled_positions(wrap=False) + eps, 1) - eps
trans_vec = []
rot_vec = []
id_vec = []
ind_ref = 0 # TODO: extend as loop over all inequivalent atoms
id_mat = np.identity(3, dtype='intc')
ref_id_list = []
for trans, rot in zip(sym["translations"], sym["rotations"]):
if np.linalg.norm(rot - id_mat) < eps: # TODO: remove this limitation
id_list = []
for i_c, coord_new in enumerate(np.mod(coords - trans + eps, 1) - eps):
no_match = True
hash_id = None
for hash_id, c in enumerate(coords):
if np.linalg.norm(coord_new - c) < eps:
id_list.append(hash_id)
no_match = False
break
if hash_id == ind_ref:
# print "ref_id: ", i_c
ref_id_list.append(i_c)
# if len(id_vec)==1:
# print "c: ", i_c, coord_new, c
if no_match:
raise ValueError("No equivalent atom found!")
trans_vec.append(trans)
rot_vec.append(rot)
id_vec.append(id_list)
eq_atoms = [0]
# print "ref_id: ", ref_id_list
return eq_atoms, trans_vec, rot_vec, id_vec, ref_id_list
def get_majority_species(self, return_count=False):
"""
This function returns the majority species and their number in the box
Returns:
number of atoms of the majority species, chemical symbol and chemical index
"""
el_dict = self.get_number_species_atoms()
el_num = list(el_dict.values())
el_name = list(el_dict.keys())
if np.sum(np.array(el_num)==np.max(el_num)) > 1:
warnings.warn('There are more than one majority species')
symbol_to_index = dict(zip(self.get_chemical_symbols(),
self.get_chemical_indices()))
max_index = np.argmax(el_num)
return {'symbol': el_name[max_index],
'count': int(np.max(el_num)),
'index': symbol_to_index[el_name[max_index]]}
def extend(self, other):
"""
Extend atoms object by appending atoms from *other*. Copied from ase
Args:
other:
Returns:
"""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self._tag_list.items():
a1 = np.array(a1)
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
if name == 'masses':
a2 = other.get_masses()
else:
a2 = other.lists.get(name)
if a2 is not None:
a[n1:] = a2
self._lists[name] = a
for name, a2 in other.lists.items():
if name in self._tag_list.keys():
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[:n1] = a2
if name == 'masses':
a[:n1] = self.get_masses()[:n1]
else:
a[:n1] = 0
self._length = n1 + n2
# Take care of the species and index
return self
def append(self, atom):
"""
Append atom to end. Copied from ase
Args:
atom:
Returns:
"""
self.extend(self.__class__([atom]))
def close(self):
# TODO: implement
pass
def get_voronoi_volume(self):
"""
Returns:
"""
warnings.warn("This function doesn't account for periodic boundary conditions. Call "
"`analyse_ovito_voronoi_volume` instead. This is what will now be returned.",
DeprecationWarning)
return self.analyse_ovito_voronoi_volume()
def __add__(self, other):
if isinstance(other, Atoms):
sum_atoms = copy(self)
sum_atoms._tag_list = sum_atoms._tag_list + other._tag_list
sum_atoms.indices = np.append(sum_atoms.indices, other.indices)
sum_atoms.positions = np.append(sum_atoms.positions, other.positions, axis=0)
new_species_lst = copy(sum_atoms.species)
ind_conv = {}
# self_species_lst = [el.Abbreviation for el in self.species]
for ind_old, el in enumerate(other.species):
if el.Abbreviation in sum_atoms._store_elements.keys():
# print ('add:: ', el.Abbreviation, self._store_elements)
ind_new = sum_atoms._species_to_index_dict[sum_atoms._store_elements[el.Abbreviation]]
ind_conv[ind_old] = ind_new
else:
new_species_lst.append(el)
sum_atoms._store_elements[el.Abbreviation] = el
ind_conv[ind_old] = len(new_species_lst) - 1
new_indices = copy(other.indices)
for key, val in ind_conv.items():
new_indices[new_indices == key] = val + 1000
new_indices = np.mod(new_indices, 1000)
sum_atoms.indices[len(self.indices):] = new_indices
sum_atoms.set_species(new_species_lst)
if not len(set(sum_atoms.indices)) == len(sum_atoms.species):
raise ValueError("Adding the atom instances went wrong!")
return sum_atoms
elif isinstance(other, Atom):
other = self.__class__([other])
return self + other
def __copy__(self):
"""
Copies the atoms object
Returns:
atoms_new: A copy of the object
"""
atoms_new = Atoms()
for key, val in self.__dict__.items():
if key not in ['_pse']:
# print ('copy: ', key)
atoms_new.__dict__[key] = copy(val)
return atoms_new
def __delitem__(self, key):
if isinstance(key, (int, np.integer)):
key = [key]
new_length = len(self) - len(key)
key = np.array(key).flatten()
self.positions = np.delete(self.positions, key, axis=0)
self.indices = np.delete(self.indices, key, axis=0)
del self._tag_list[key]
self._tag_list._length = new_length
deleted_species_indices = list()
retain_species_indices = list()
new_indices = self.indices.copy()
for i, el in enumerate(self.species):
if len(self.select_index(el)) == 0:
deleted_species_indices.append(i)
new_indices[new_indices >= i] += -1
else:
retain_species_indices.append(i)
new_species = np.array(self.species).copy()[retain_species_indices]
self.set_species(new_species)
self.indices = new_indices
def __eq__(self, other):
if not (isinstance(other, Atoms)):
raise AssertionError()
conditions = []
for a_1, a_2 in zip(self, other):
conditions.append(a_1 == a_2)
conditions.append(np.alltrue(self.pbc == other.pbc))
return all(conditions)
def __ne__(self, other):
return not self == other
def __getitem__(self, item):
new_dict = dict()
if isinstance(item, int):
for key, value in self._tag_list.items():
if item < len(value):
if value[item] is not None:
new_dict[key] = value[item]
element = self.species[self.indices[item]]
index = item
position = self.positions[item]
return Atom(element=element, position=position, pse=self._pse, index=index, atoms=self, **new_dict)
new_array = copy(self)
new_array.positions = self.positions[item]
new_indices = self.indices[item].copy()
new_species_indices, new_proper_indices = np.unique(new_indices, return_inverse=True)
new_species = [self.species[ind] for ind in new_species_indices]
new_array.set_species(new_species)
new_array.indices = new_proper_indices
new_array._tag_list = self._tag_list[item]
# new_array._tag_list._length = self._tag_list._length
new_array._tag_list._length = len(new_array)
if isinstance(new_array, Atom):
natoms = len(self)
if item < -natoms or item >= natoms:
raise IndexError('Index out of range.')
new_array.index = item
return new_array
def __getattr__(self, item):
if item in self._tag_list.keys():
return self._tag_list._lists[item]
return object.__getattribute__(self, item)
def __len__(self):
return len(self.indices)
def __repr__(self):
return self.__str__()
def __str__(self):
if len(self) == 0:
return "[]"
out_str = ""
for el, pos in zip(self.get_chemical_symbols(), self.positions):
out_str += el + ": " + str(pos) + "\n"
if len(self.get_tags()) > 0:
tags = self.get_tags()
out_str += "tags: \n" # + ", ".join(tags) + "\n"
for tag in tags:
out_str += " " + str(tag) + ": " + self._tag_list[tag].__str__() + "\n"
if self._cell is not None:
out_str += "pbc: " + str(self.pbc) + "\n"
out_str += "cell: \n"
out_str += str(self.cell) + "\n"
return out_str
def __setitem__(self, key, value):
if isinstance(key, (int, np.integer)):
old_el = self.species[self.indices[key]]
if isinstance(value, (str, np.str, np.str_)):
el = PeriodicTable().element(value)
elif isinstance(value, ChemicalElement):
el = value
else:
raise TypeError('value should either be a string or a ChemicalElement.')
if el != old_el:
new_species = np.array(self.species).copy()
if len(self.select_index(old_el)) == 1:
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
new_species[self.indices[key]] = el
self.set_species(list(new_species))
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
remove_index = self.indices[key]
new_species = list(new_species)
del new_species[remove_index]
self.indices[key] = ind
self.indices[self.indices > remove_index] -= 1
self.set_species(new_species)
else:
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
new_species = list(new_species)
new_species.append(el)
self.set_species(new_species)
self.indices[key] = len(new_species) - 1
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
self.indices[key] = ind
elif isinstance(key, slice) or isinstance(key, (list, tuple, np.ndarray)):
if not isinstance(key, slice):
if hasattr(key, '__len__'):
if len(key) == 0:
return
else:
if key.start is not None:
if key.stop is not None:
key = np.arange(key.start, key.stop, key.step)
else:
if key.start >= 0:
key = np.arange(key.start, len(self), key.step)
else:
key = np.arange(len(self) + key.start, len(self), key.step)
else:
if key.stop is not None:
key = np.arange(0, key.stop, key.step)
else:
key = np.arange(0, len(self), key.step)
if isinstance(value, (str, np.str, np.str_, int, np.integer)):
el = PeriodicTable().element(value)
elif isinstance(value, ChemicalElement):
el = value
else:
raise ValueError("The value assigned should be a string, integer or a ChemicalElement instance")
replace_list = list()
new_species = list(np.array(self.species).copy())
for sp in self.species:
replace_list.append(np.array_equal(np.sort(self.select_index(sp)),
np.sort(np.intersect1d(self.select_index(sp), key))))
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
if not any(replace_list):
new_species.append(el)
self.set_species(new_species)
self.indices[key] = len(new_species) - 1
else:
replace_ind = np.where(replace_list)[0][0]
new_species[replace_ind] = el
if len(np.where(replace_list)[0]) > 1:
for ind in replace_list[1:]:
del new_species[ind]
self.set_species(new_species)
self.indices[key] = replace_ind
else:
el_list = np.array([sp.Abbreviation for sp in new_species])
ind = np.argwhere(el_list == el.Abbreviation).flatten()[-1]
if not any(replace_list):
self.set_species(new_species)
self.indices[key] = ind
else:
self.indices[key] = ind
delete_indices = list()
new_indices = self.indices.copy()
for i, rep in enumerate(replace_list):
if i != ind and rep:
delete_indices.append(i)
# del new_species[i]
new_indices[new_indices > i] -= 1
self.indices = new_indices.copy()
new_species = np.array(new_species)[np.setdiff1d(np.arange(len(new_species)),
delete_indices)].tolist()
self.set_species(new_species)
else:
raise NotImplementedError()
__mul__ = repeat
def __imul__(self, vec):
"""
Args:
vec:
Returns:
"""
if isinstance(vec, int):
vec = [vec] * self.dimension
if not (len(vec) == self.dimension):
raise AssertionError()
i_vec = np.array([vec[0], 1, 1])
if self.dimension > 1:
i_vec[1] = vec[1]
if self.dimension > 2:
i_vec[2] = vec[2]
if not self.dimension == 3:
raise NotImplementedError()
mx, my, mz = i_vec
nx_lst, ny_lst, nz_lst = np.arange(mx), np.arange(my), np.arange(mz)
positions = self.get_scaled_positions(wrap=False)
lat = np.array(np.meshgrid(nx_lst, ny_lst, nz_lst)).T.reshape(-1, 3)
lat_new = np.repeat(lat, len(positions), axis=0)
new_positions = np.tile(positions, (len(lat), 1)) + lat_new
self._length = len(new_positions)
self.set_scaled_positions(new_positions/np.array(i_vec))
self.indices = np.tile(self.indices, len(lat))
self._tag_list._length = len(self)
# print ('basis_len: ', len(self.positions), len(new_elements))
# self.cell = (self.cell.T * np.array(vec)).T
self.set_cell((self.cell.T * np.array(vec)).T, scale_atoms=True)
scale = i_vec[0] * i_vec[1] * i_vec[2]
for tag in self._tag_list.keys():
self._tag_list[tag] *= scale
return self # to make it compatible with ASE
@staticmethod
def convert_formula(elements):
"""
Args:
elements:
Returns:
"""
el_list = []
num_list = ""
for i, char in enumerate(elements):
is_last = (i == len(elements) - 1)
if len(num_list) > 0:
if (not char.isdigit()) or is_last:
el_fac = ast.literal_eval(num_list) * el_list[-1]
for el in el_fac[1:]:
el_list.append(el)
num_list = ""
if char.isupper():
el_list.append(char)
elif char.islower():
el_list[-1] += char
elif char.isdigit():
num_list += char
if len(num_list) > 0:
# print "num_list: ", el_list, num_list, el_list[-1], (not char.isdigit()) or is_last
if (not char.isdigit()) or is_last:
el_fac = ast.literal_eval(num_list) * [el_list[-1]]
# print "el_fac: ", el_fac
for el in el_fac[1:]:
el_list.append(el)
num_list = ""
return el_list
# ASE compatibility
@staticmethod
def get_calculator():
return None
def get_cell(self, complete=False):
"""Get the three unit cell vectors as a 3x3 ndarray."""
if complete:
return complete_cell(self._cell)
else:
return self._cell.copy()
def get_distance(self, a0, a1, mic=True, vector=False):
"""
Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
vector=True gives the distance vector (from a0 to a1).
Args:
a0: position or atom ID
a1: position or atom ID
mic: minimum image convention (True if periodic boundary conditions should be considered)
vector: True, if instead of distnce the vector connecting the two positions should be returned
Returns: distance or vectors in length unit
"""
from ase.geometry import find_mic
positions = self.positions
if isinstance(a0, list) or isinstance(a0, np.ndarray):
if not (len(a0) == 3):
raise AssertionError()
a0 = np.array(a0)
else:
a0 = positions[a0]
if isinstance(a1, list) or isinstance(a1, np.ndarray):
if not (len(a1) == 3):
raise AssertionError()
a1 = np.array(a1)
else:
a1 = positions[a1]
distance = np.array([a1 - a0])
if mic:
distance, d_len = find_mic(distance, self.cell, self.pbc)
else:
d_len = np.array([np.sqrt((distance ** 2).sum())])
if vector:
return distance[0]
return d_len[0]
def get_distances(self, a0=None, a1=None, mic=True, vector=False):
"""
Return distance matrix of every position in p1 with every position in p2
Args:
a0 (numpy.ndarray/list): Nx3 array of positions
a1 (numpy.ndarray/list): Nx3 array of positions
mic (bool): minimum image convention
vector (bool): return vectors instead of distances
Returns:
numpy.ndarray NxN if vector=False and NxNx3 if vector=True
if a1 is not set, it is assumed that distances between all positions in a0 are desired. a1 will be set to a0 in this case.
if both a0 and a1 are not set, the distances between all atoms in the box are returned
Use mic to use the minimum image convention.
Learn more about get_distances from the ase website:
https://wiki.fysik.dtu.dk/ase/ase/geometry.html#ase.geometry.get_distances
"""
if (a0 is not None and len(np.array(a0).shape)!=2) or (a1 is not None and len(np.array(a1).shape)!=2):
raise ValueError('a0 and a1 have to be None or Nx3 array')
if a0 is None and a1 is not None:
a0 = a1
a1 = None
if a0 is None:
a0 = self.positions
if mic:
vec, dist = get_distances(a0, a1, cell=self.cell, pbc=self.pbc)
else:
vec, dist = get_distances(a0, a1)
if vector:
return vec
else:
return dist
def get_distance_matrix(self, mic=True, vector=False):
"""
Return distances between all atoms in a matrix. cf. get_distance
"""
warnings.warn('get_distance_matrix is deprecated. Use get_distances instead', DeprecationWarning)
return self.get_distances(mic=mic, vector=vector)
def get_constraint(self):
if 'selective_dynamics' in self._tag_list._lists.keys():
from ase.constraints import FixAtoms
return FixAtoms(indices=np.array([atom_ind for atom_ind in range(len(self))
if any(self.selective_dynamics[atom_ind])]))
else:
return None
def set_constraint(self, constrain):
if constrain.todict()['name'] != 'FixAtoms':
raise ValueError('Only FixAtoms is supported as ASE compatible constraint.')
if 'selective_dynamics' not in self._tag_list._lists.keys():
self.add_tag(selective_dynamics=None)
for atom_ind in range(len(self)):
if atom_ind in constrain.index:
self.selective_dynamics[atom_ind] = [True, True, True]
else:
self.selective_dynamics[atom_ind] = [False, False, False]
def get_initial_magnetic_moments(self):
"""
Get array of initial magnetic moments.
Returns:
numpy.array()
"""
if 'spin' in self._tag_list._lists.keys():
return np.array(list(self.spin.values()))
else:
spin_lst = [element.tags['spin'] if 'spin' in element.tags.keys() else None
for element in self.get_chemical_elements()]
if any(spin_lst):
if (isinstance(spin_lst, str) or
(isinstance(spin_lst, (list, np.ndarray)) and isinstance(spin_lst[0], str))
) and '[' in list(set(spin_lst))[0]:
return np.array(
[[float(spin_dir) for spin_dir in spin.replace('[', '').replace(']', '').replace(',', '').split()]
if spin else [0.0, 0.0, 0.0] for spin in spin_lst])
elif isinstance(spin_lst, (list, np.ndarray)):
return np.array(spin_lst)
else:
return np.array([float(spin) if spin else 0.0 for spin in spin_lst])
else:
return np.array([None] * len(self))
def set_initial_magnetic_moments(self, magmoms):
"""
Set array of initial magnetic moments.
Args:
magmoms (numpy.array()):
"""
if magmoms is not None:
if len(magmoms) != len(self):
raise ValueError('magmons can be collinear or non-collinear.')
for ind, element in enumerate(self.get_chemical_elements()):
if 'spin' in element.tags.keys():
self[ind] = element.Parent
if 'spin' not in self._tag_list._lists.keys():
self.add_tag(spin=None)
for ind, spin in enumerate(magmoms):
self.spin[ind] = spin
def pop(self, i=-1):
"""
Remove and return atom at index *i* (default last).
Args:
i:
Returns:
"""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def rotate(self, vector, angle=None, center=(0, 0, 0), rotate_cell=False, index_list=None):
"""
Rotate atoms based on a vector and an angle, or two vectors. This function is completely adopted from ASE code
(https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.rotate)
Args:
rotate_cell:
center:
vector (list/numpy.ndarray/string):
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
angle (float/list) in radians = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
center = [0, 0, 0]:
The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
index_list (list/numpy.ndarray):
Indices of atoms to be rotated
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> atoms = Atoms('H', [[-0.1, 1.01, -0.5]], cell=[[1, 0, 0], [0, 1, 0], [0, 0, 4]], pbc=[1, 1, 0])
>>> a = (22./ 7.) / 2. # pi/2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
vector = string2vector(vector)
if angle is None:
angle = norm(vector)
if isinstance(angle, (float, int)):
vector /= norm(vector)
c = cos(angle)
s = sin(angle)
else:
v2 = string2vector(angle)
vector /= norm(vector)
v2 /= norm(v2)
c = np.dot(vector, v2)
vector = np.cross(vector, v2)
s = norm(vector)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
vector = np.cross((0, 0, 1), v2)
if norm(vector) < eps:
vector = np.cross((1, 0, 0), v2)
if not (norm(vector) >= eps):
raise AssertionError()
elif s > 0:
vector /= s
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = np.mean(self.get_positions(), axis=0)
elif center.lower() == 'cou':
center = self.cell.sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
if index_list is not None:
if not (len(index_list) > 0):
raise AssertionError()
rotate_list = np.array(index_list)
else:
rotate_list = np.array(len(self)*[True])
p = self.positions[rotate_list] - center
self.positions[rotate_list] = (c * p -
np.cross(p, s * vector) +
np.outer(np.dot(p, vector), (1.0 - c) * vector) +
center)
if rotate_cell:
rotcell = self.cell
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * vector) +
np.outer(np.dot(rotcell, vector), (1.0 - c) * vector))
self.cell = rotcell
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. a sequence of length 3 with the
coordinates, or 'COM' to select the center of mass, 'COP' to
select center of positions or 'COU' to select center of cell.
phi :
The 1st rotation angle around the z axis (in radian)
theta :
Rotation around the x axis (in radian)
psi :
2nd rotation around the z axis (in radian)
"""
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.cell.sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
if self._is_scaled:
rcoords = self.get_scaled_positions(wrap=False) - center
else:
rcoords = self.positions - center
# First Euler rotation about z in matrix form
d = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
c = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
b = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
a = np.dot(b, np.dot(c, d))
# Do the rotation
rcoords = np.dot(a, np.transpose(rcoords))
# Move back to the rotation point
if self._is_scaled:
self.set_scaled_positions(np.transpose(rcoords) + center)
else:
self.positions = np.transpose(rcoords) + center
@property
def scaled_positions(self):
warnings.warn('scaled_positions is deprecated. Use get_scaled_positions instead', DeprecationWarning)
return self.get_scaled_positions(wrap=False)
@scaled_positions.setter
def scaled_positions(self, positions):
warnings.warn('scaled_positions is deprecated. Use set_scaled_positions instead', DeprecationWarning)
self.set_scaled_positions(positions)
def set_scaled_positions(self, scaled):
"""
Set positions relative to unit cell.
Args:
scaled (numpy.ndarray/list): The relative coordinates
"""
if self.cell is None:
raise ValueError('cell has not been set yet')
self.positions = np.einsum('jk,ij->ik', self.cell, scaled)
def set_cell(self, cell, scale_atoms=False):
"""
Set unit cell vectors.
Parameters:
cell: 3x3 matrix or length 3 or 6 vector
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell. Another option is
6 numbers, which describes unit cell with lengths of unit cell
vectors and with angles between them (in degrees), in following
order: [len(a), len(b), len(c), angle(b,c), angle(a,c),
angle(a,b)]. First vector will lie in x-direction, second in
xy-plane, and the third one in z-positive subspace.
scale_atoms: bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> atoms = Atoms('He')
>>> a, b, c = 7, 7.5, 8
>>> atoms.set_cell([a, b, c])
>>> atoms.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> atoms.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
Hexagonal unit cell:
>>> atoms.set_cell([a, a, c, 90, 90, 120])
Rhombohedral unit cell:
>>> alpha = 77
>>> atoms.set_cell([a, a, a, alpha, alpha, alpha])
"""
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape == (6,):
cell = cellpar_to_cell(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence, length 6 '
'sequence or 3x3 matrix!')
if np.linalg.det(cell)<=0:
raise ValueError('Cell must be a full dimensional matrix with '
'right hand orientation')
if scale_atoms:
M = np.linalg.solve(self.get_cell(complete=True),
complete_cell(cell))
self.positions[:] = np.dot(self.positions, M)
self._cell = cell
def translate(self, displacement):
"""
Translate atomic positions.
The displacement argument can be a float, an xyz vector, or an
nx3 array (where n is the number of atoms).
Args:
displacement:
Returns:
"""
self.positions += np.array(displacement)
def wrap(self, center=(0.5, 0.5, 0.5), pbc=None, eps=1e-7):
"""Wrap positions to unit cell.
Parameters:
center: three float
The positons in fractional coordinates that the new positions
will be nearest possible to.
pbc: one or 3 bool
For each axis in the unit cell decides whether the positions
will be moved along this axis. By default, the boundary
conditions of the Atoms object will be used.
eps: float
Small number to prevent slightly negative coordinates from beeing
wrapped.
See also the :func:`ase.utils.geometry.wrap_positions` function.
Example:
>>> a = Atoms('H',
... [[-0.1, 1.01, -0.5]],
... cell=[[1, 0, 0], [0, 1, 0], [0, 0, 4]],
... pbc=[1, 1, 0])
>>> a.wrap()
>>> a.positions
array([[ 0.9 , 0.01, -0.5 ]])
"""
from ase.utils.geometry import wrap_positions
if pbc is None:
pbc = self.pbc
self.positions = wrap_positions(self.positions, self.cell,
pbc, center, eps)
def write(self, filename, format=None, **kwargs):
"""
Write atoms object to a file.
see ase.io.write for formats.
kwargs are passed to ase.io.write.
Args:
filename:
format:
**kwargs:
Returns:
"""
from ase.io import write
atoms = self.copy()
atoms.arrays["positions"] = atoms.positions
write(filename, atoms, format, **kwargs)
class _CrystalStructure(Atoms):
"""
only for historical reasons
Args:
element:
BravaisLattice:
BravaisBasis:
LatticeConstants:
Dimension:
relCoords:
PSE:
**kwargs:
"""
def __init__(self,
element="Fe",
bravais_lattice='cubic',
bravais_basis='primitive',
lattice_constants=None, # depending on symmetry length and angles
dimension=3,
rel_coords=True,
pse=None,
**kwargs):
# print "basis0"
# allow also for scalar input for LatticeConstants (for a cubic system)
if lattice_constants is None:
lattice_constants = [1.]
try:
test = lattice_constants[0]
except (TypeError, IndexError):
lattice_constants = [lattice_constants]
self.bravais_lattice = bravais_lattice
self.bravais_basis = bravais_basis
self.lattice_constants = lattice_constants
self.dimension = dimension
self.relCoords = rel_coords
self.element = element
self.__updateCrystal__(pse)
self.crystalParamsDict = {'BravaisLattice': self.bravais_lattice, 'BravaisBasis': self.bravais_basis,
'LatticeConstants': self.lattice_constants}
self.crystal_lattice_dict = {3: {
"cubic": ["fcc", "bcc", "primitive"],
"hexagonal": ["primitive", "hcp"],
"monoclinic": ["primitive", "base-centered"],
"triclinic": ["primitive"],
"orthorombic": ["primitive", "body-centered", "base-centered", "face-centered"],
"tetragonal": ["primitive", "body-centered"],
"rhombohedral": ["primitive"]}, 2: {
"oblique": ["primitive"],
"rectangular": ["primitive", "centered"],
"hexagonal": ["primitive"],
"square": ["primitive"]}, 1: {"line": ["primitive"]}}
# init structure for lattice parameters alat, blat, clat, alpha, beta, gamma
self.crystalLatticeParams = {3: {"cubic": [1.],
"hexagonal": [1., 2.],
"monoclinic": [1., 1., 1., 90.],
"triclinic": [1., 2., 3., 90., 90., 90.],
"orthorombic": [1., 1., 1.],
"tetragonal": [1., 2.],
"rhombohedral": [1., 90., 90., 90.]}, 2: {"oblique": [1., 1., 90.],
"rectangular": [1., 1.],
"hexagonal": [1.],
"square": [1.]}, 1: {"line": [1.]}}
# print "basis"
super(_CrystalStructure, self).__init__(elements=self.ElementList,
scaled_positions=self.coordinates,
cell=self.amat, # tag = "Crystal",
pbc=[True, True, True][0:self.dimension])
# ## private member functions
def __updateCrystal__(self, pse=None):
"""
Args:
pse:
Returns:
"""
self.__updateAmat__()
self.__updateCoordinates__()
self.__updateElementList__(pse)
def __updateAmat__(self): # TODO: avoid multi-call of this function
"""
Returns:
"""
# print "lat constants (__updateAmat__):", self.LatticeConstants
a_lat = self.lattice_constants[0]
if self.dimension == 3:
alpha = None
beta = None
gamma = None
b_lat, c_lat = None, None
if self.bravais_lattice == 'cubic':
b_lat = c_lat = a_lat
alpha = beta = gamma = 90 / 180. * np.pi # 90 degrees
elif self.bravais_lattice == 'tetragonal':
b_lat = a_lat
c_lat = self.lattice_constants[1]
alpha = beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == 'triclinic':
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = self.lattice_constants[3] / 180. * np.pi
beta = self.lattice_constants[4] / 180. * np.pi
gamma = self.lattice_constants[5] / 180. * np.pi
elif self.bravais_lattice == 'hexagonal':
b_lat = a_lat
c_lat = self.lattice_constants[1]
alpha = 60. / 180. * np.pi # 60 degrees
beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == 'orthorombic':
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = beta = gamma = 0.5 * np.pi # 90 degrees
elif self.bravais_lattice == 'rhombohedral':
b_lat = a_lat
c_lat = a_lat
alpha = self.lattice_constants[1] / 180. * np.pi
beta = self.lattice_constants[2] / 180. * np.pi
gamma = self.lattice_constants[3] / 180. * np.pi
elif self.bravais_lattice == 'monoclinic':
b_lat = self.lattice_constants[1]
c_lat = self.lattice_constants[2]
alpha = 0.5 * np.pi
beta = self.lattice_constants[3] / 180. * np.pi
gamma = 0.5 * np.pi
b1 = np.cos(alpha)
b2 = np.sin(alpha)
c1 = np.cos(beta)
c2 = (np.cos(gamma) - np.cos(beta) * np.cos(alpha)) / np.sin(alpha)
self.amat = np.array([[a_lat, 0., 0.],
[b_lat * b1, b_lat * b2, 0.],
[c_lat * c1, c_lat * c2, c_lat * np.sqrt(1 - c2 * c2 - c1 * c1)]])
elif self.dimension == 2: # TODO not finished yet
self.amat = a_lat * np.array([[1., 0.], [0., 1.]])
if self.bravais_lattice == 'rectangular':
b_lat = self.lattice_constants[1]
self.amat = np.array([[a_lat, 0.], [0., b_lat]])
elif self.dimension == 1:
self.amat = a_lat * np.array([[1.]])
else:
raise ValueError("Bravais lattice not defined!")
def __updateElementList__(self, pse=None):
"""
Args:
pse:
Returns:
"""
self.ElementList = len(self.coordinates) * [self.element]
def __updateCoordinates__(self):
"""
Returns:
"""
# if relative coordinates
basis = None
if self.dimension == 3:
if self.bravais_basis == "fcc" or self.bravais_basis == "face-centered":
basis = np.array([[0., 0., 0.], [0.5, 0.5, 0.], [0.5, 0., 0.5], [0., 0.5, 0.5]])
elif self.bravais_basis == "body-centered" or self.bravais_basis == "bcc":
basis = np.array([[0., 0., 0.], [0.5, 0.5, 0.5]])
elif self.bravais_basis == "base-centered":
basis = np.array([[0., 0., 0.], [0.5, 0.5, 0.]])
elif self.bravais_basis == "hcp":
# basis = r([[0.0,-1./np.sqrt(3.),np.sqrt(8./3.)]])
# a = self.LatticeConstants[0]
# c = self.LatticeConstants[1]
basis = np.array([[0., 0., 0.], [1. / 3., 1. / 3., 1. / 2.]])
# basis = np.dot(basis,np.linalg.inv(self.amat))
elif self.bravais_basis == "primitive":
basis = np.array([[0., 0., 0.]])
else:
exit()
elif self.dimension == 2:
if self.bravais_basis == "primitive":
basis = np.array([[0., 0.]])
elif self.bravais_basis == "centered":
basis = np.array([[0., 0.], [0.5, 0.5]])
else:
exit()
elif self.dimension == 1:
if self.bravais_basis == "primitive":
basis = np.array([[0.]])
else:
exit()
self.coordinates = basis
# ########################### get commmands ########################
def get_lattice_types(self):
"""
Returns:
"""
self.crystal_lattice_dict[self.dimension].keys().sort()
return self.crystal_lattice_dict[self.dimension].keys()
def get_dimension_of_lattice_parameters(self):
"""
Returns:
"""
# print "getDimensionOfLatticeParameters"
counter = 0
for k in self.get_needed_lattice_parameters():
if k:
counter += 1
return counter
def get_needed_lattice_parameters(self):
"""
Returns:
"""
# print "call: getNeededLatticeParams"
needed_params = [True, False, False, False, False, False]
if self.dimension == 3:
if self.bravais_lattice == 'cubic':
needed_params = [True, False, False, False, False,
False] # stands for alat, blat, clat, alpha, beta, gamma
elif self.bravais_lattice == 'triclinic':
needed_params = [True, True, True, True, True, True]
elif self.bravais_lattice == 'monoclinic':
needed_params = [True, True, True, True, False, False]
elif self.bravais_lattice == 'orthorombic':
needed_params = [True, True, True, False, False, False]
elif self.bravais_lattice == 'tetragonal':
needed_params = [True, False, True, False, False, False]
elif self.bravais_lattice == 'rhombohedral':
needed_params = [True, False, False, True, True, True]
elif self.bravais_lattice == 'hexagonal':
needed_params = [True, False, True, False, False, False]
elif self.dimension == 2:
if self.bravais_lattice == 'oblique':
needed_params = [True, True, False, True, False, False]
elif self.bravais_lattice == 'rectangular':
needed_params = [True, True, False, False, False, False]
elif self.bravais_lattice == 'hexagonal':
needed_params = [True, False, False, False, False, False]
elif self.bravais_lattice == 'square':
needed_params = [True, False, False, False, False, False]
else: # TODO: need to be improved
needed_params = [True, False, False, False, False, False]
elif self.dimension == 1:
if self.bravais_lattice == 'line':
needed_params = [True, False, False, False, False, False]
else: # TODO: improval needed
needed_params = [True, False, False, False, False, False]
else:
raise ValueError("inconsistency in lattice structures")
return needed_params
def get_basis_types(self):
"""
Returns:
"""
self.crystal_lattice_dict[self.dimension].get(self.bravais_lattice).sort()
return self.crystal_lattice_dict[self.dimension].get(self.bravais_lattice)
def get_initial_lattice_constants(self):
"""
Returns:
"""
self.crystalLatticeParams[self.dimension].get(self.bravais_lattice).sort()
return self.crystalLatticeParams[self.dimension].get(self.bravais_lattice).sort()
# def getDimension(self):
# return self.dimension
# def getCoordinates(self):
# return self.coordinates
# def getCell(self):
# return self.amat
def get_atom_structure(self, rel=True):
"""
Args:
rel:
Returns:
"""
# print self.relCoords, self.amat
return Atoms(
elementList=self.ElementList,
coordinates=self.coordinates,
amat=self.amat,
tag="Crystal",
rel=rel, # self.relCoords, #rel, # true or false # coordinates are given in relative lattice units
pbc=[True, True, True][0:self.dimension],
Crystal=self.crystalParamsDict
)
# #################### set commands #########################
def set_lattice_constants(self, lattice_constants=None):
"""
Args:
lattice_constants:
Returns:
"""
if lattice_constants is None:
lattice_constants = [1.]
for k in lattice_constants:
if k <= 0:
raise ValueError("negative lattice parameter(s)")
self.lattice_constants = lattice_constants
self.__updateCrystal__()
def set_element(self, element="Fe"):
"""
Args:
element:
Returns:
"""
self.element = element
self.__updateCrystal__()
def set_dimension(self, dim=3):
"""
Args:
dim:
Returns:
"""
self.dimension = dim
length = self.get_dimension_of_lattice_parameters()
if dim == 3: # # initial 3d structure
self.lattice_constants = length * [1.]
self.bravais_lattice = "cubic"
self.bravais_basis = "primitive"
elif dim == 2: # # initial 2d structure
self.lattice_constants = length * [1.]
self.bravais_lattice = "square"
self.bravais_basis = "primitive"
elif dim == 1: # # initial 1d structure
self.lattice_constants = length * [1.]
self.bravais_lattice = "line"
self.bravais_basis = "primitive"
self.__updateCrystal__()
def set_lattice_type(self, name_lattice='cubic'):
"""
Args:
name_lattice:
Returns:
"""
# catch input error
# print "lattice type =", name_lattice
if name_lattice not in self.get_lattice_types():
raise ValueError("is not item of ")
else:
self.bravais_lattice = name_lattice
self.set_lattice_constants(self.get_dimension_of_lattice_parameters() * [1.])
self.set_basis_type(
name_basis=self.crystal_lattice_dict[self.dimension].get(name_lattice)[0]) # initial basis type
self.__updateCrystal__()
def set_basis_type(self, name_basis='primitive'):
"""
Args:
name_basis:
Returns:
"""
if name_basis not in self.get_basis_types():
raise ValueError("is not item of")
else:
self.bravais_basis = name_basis
self.__updateCrystal__()
def atoms(self):
"""
Returns:
"""
return Atoms(elements=self.ElementList,
scaled_positions=self.coordinates,
cell=self.amat,
pbc=[True, True, True][0:self.dimension])
class Neighbors:
"""
Class for storage of the neighbor information for a given atom based on the KDtree algorithm
"""
def __init__(self):
self._distances = None
self._vecs = None
self._indices = None
self._shells = None
@property
def distances(self):
return self._distances
@distances.setter
def distances(self, new_distances):
if isinstance(new_distances, list) or isinstance(new_distances, np.ndarray):
self._distances = np.array(new_distances)
else:
raise TypeError('Only lists and np.arrays are supported.')
@property
def vecs(self):
return self._vecs
@vecs.setter
def vecs(self, new_vecs):
if isinstance(new_vecs, list) or isinstance(new_vecs, np.ndarray):
self._vecs = np.array(new_vecs)
else:
raise TypeError('Only lists and np.arrays are supported.')
@property
def indices(self):
return self._indices
@indices.setter
def indices(self, new_indices):
if isinstance(new_indices, list) or isinstance(new_indices, np.ndarray):
self._indices = np.array(new_indices)
else:
raise TypeError('Only lists and np.arrays are supported.')
@property
def shells(self):
return self._shells
@shells.setter
def shells(self, new_shells):
if isinstance(new_shells, list) or isinstance(new_shells, np.array):
self._shells = np.array(new_shells)
else:
raise TypeError('Only lists and np.arrays are supported.')
class CrystalStructure(object):
def __new__(cls, *args, **kwargs):
basis = _CrystalStructure(*args, **kwargs).atoms()
return basis
def ase_to_pyiron(ase_obj):
"""
Args:
ase_obj:
Returns:
"""
try:
import ase
except ImportError:
raise ValueError('ASE package not yet installed')
element_list = ase_obj.get_chemical_symbols()
cell = ase_obj.cell
positions = ase_obj.get_positions()
pbc = ase_obj.get_pbc()
spins = ase_obj.get_initial_magnetic_moments()
if all(spins == np.array(None)) or sum(np.abs(spins)) == 0.0:
pyiron_atoms = Atoms(elements=element_list, positions=positions, pbc=pbc, cell=cell)
else:
if any(spins == np.array(None)):
spins[spins == | np.array(None) | numpy.array |
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import dshape, isnumeric, Record, Option, DataShape, maxtype
from datashape import coretypes as ct
from toolz import concat, unique, memoize, identity
from .utils import ngjit
# Dynd Missing Type Flags
_dynd_missing_types = {np.dtype('i2'): np.iinfo('i2').min,
np.dtype('i4'): np.iinfo('i4').min,
np.dtype('i8'): np.iinfo('i8').min,
np.dtype('f4'): np.nan,
np.dtype('f8'): np.nan}
def make_is_missing(m):
return ngjit(lambda x: x == m)
# Lookup from dtype to function that checks if value is missing
_dynd_is_missing = {}
for dt, m in _dynd_missing_types.items():
_dynd_is_missing[dt] = np.isnan if m is np.nan else make_is_missing(m)
def numpy_dtype(x):
if hasattr(x, 'ty'):
return numpy_dtype(x.ty)
return x.to_numpy_dtype()
def optionify(d):
if isinstance(d, DataShape):
return DataShape(*(optionify(i) for i in d.parameters))
return d if isinstance(d, Option) else Option(d)
class Aggregation(object):
def __hash__(self):
return hash((type(self), self.inputs))
def __eq__(self, other):
return type(self) is type(other) and self.inputs == other.inputs
class Reduction(Aggregation):
def __init__(self, column):
self.column = column
def validate(self, in_dshape):
if not isnumeric(in_dshape.measure[self.column]):
raise ValueError("input must be numeric")
def out_dshape(self, in_dshape):
if hasattr(self, '_dshape'):
return self._dshape
return dshape(optionify(in_dshape.measure[self.column]))
@property
def inputs(self):
return (self.column,)
@property
def _bases(self):
return (self,)
@property
def _temps(self):
return ()
@memoize
def _build_create(self, dshape):
dtype = numpy_dtype(dshape.measure)
value = _dynd_missing_types[dtype]
return lambda shape: np.full(shape, value, dtype=dtype)
class count(Reduction):
_dshape = dshape(Option(ct.int32))
def validate(self, in_dshape):
pass
@memoize
def _build_create(self, dshape):
dtype = numpy_dtype(dshape.measure)
return lambda shape: np.zeros(shape, dtype=dtype)
def _build_append(self, dshape):
return append_count
def _build_combine(self, dshape):
return combine_count
def _build_finalize(self, dshape):
return identity
class sum(Reduction):
def out_dshape(self, input_dshape):
return dshape(optionify(maxtype(input_dshape.measure[self.column])))
def _build_append(self, dshape):
return build_append_sum(dshape)
def _build_combine(self, dshape):
return combine_sum
def _build_finalize(self, dshape):
return identity
class min(Reduction):
@memoize
def _build_create(self, dshape):
dtype = numpy_dtype(dshape.measure)
if np.issubdtype(dtype, np.floating):
value = np.inf
else:
value = np.iinfo(dtype).max
return lambda shape: np.full(shape, value, dtype=dtype)
def _build_append(self, dshape):
return append_min
def _build_combine(self, dshape):
return combine_min
def _build_finalize(self, dshape):
return build_finalize_min(dshape.measure[self.column])
class max(Reduction):
@memoize
def _build_create(self, dshape):
dtype = numpy_dtype(dshape.measure)
if np.issubdtype(dtype, np.floating):
value = -np.inf
else:
value = np.iinfo(dtype).min
return lambda shape: np.full(shape, value, dtype=dtype)
def _build_append(self, dshape):
return append_max
def _build_combine(self, dshape):
return combine_max
def _build_finalize(self, dshape):
return build_finalize_max(dshape.measure[self.column])
class m2(Reduction):
"""Second moment"""
_dshape = dshape(ct.float64)
@property
def _temps(self):
return (sum(self.column), count(self.column))
def _build_append(self, dshape):
return append_m2
def _build_combine(self, dshape):
return combine_m2
def _build_finalize(self, dshape):
return identity
class mean(Reduction):
_dshape = dshape(Option(ct.float64))
@property
def _bases(self):
return (sum(self.column), count(self.column))
def _build_finalize(self, dshape):
return finalize_mean
class var(Reduction):
_dshape = dshape(Option(ct.float64))
@property
def _bases(self):
return (sum(self.column), count(self.column), m2(self.column))
def _build_finalize(self, dshape):
return finalize_var
class std(Reduction):
_dshape = dshape(Option(ct.float64))
@property
def _bases(self):
return (sum(self.column), count(self.column), m2(self.column))
def _build_finalize(self, dshape):
return finalize_std
class Summary(Aggregation):
def __init__(self, **kwargs):
ks, vs = zip(*sorted(kwargs.items()))
self.keys = ks
self.values = [Summary(**v) if isinstance(v, dict) else v for v in vs]
def __hash__(self):
return hash((type(self), tuple(self.keys), tuple(self.values)))
def validate(self, input_dshape):
for v in self.values:
v.validate(input_dshape)
def out_dshape(self, in_dshape):
return dshape(Record([(k, v.out_dshape(in_dshape)) for (k, v)
in zip(self.keys, self.values)]))
@property
def inputs(self):
return tuple(unique(concat(v.inputs for v in self.values)))
# ============ Appenders ============
@ngjit
def append_count(x, y, agg, field):
agg[y, x] += 1
@ngjit
def append_max(x, y, agg, field):
if agg[y, x] < field:
agg[y, x] = field
@ngjit
def append_min(x, y, agg, field):
if agg[y, x] > field:
agg[y, x] = field
@ngjit
def append_m2(x, y, m2, field, sum, count):
# sum & count are the results of sum[y, x], count[y, x] before being
# updated by field
if count == 0:
m2[y, x] = 0
else:
u1 = np.float64(sum) / count
u = np.float64(sum + field) / (count + 1)
m2[y, x] += (field - u1) * (field - u)
@memoize
def build_append_sum(dshape):
# sum needs specialization for each missing flag
dtype = numpy_dtype(dshape)
is_missing = _dynd_is_missing[dtype]
@ngjit
def append_sum(x, y, agg, field):
if is_missing(agg[y, x]):
agg[y, x] = field
else:
agg[y, x] += field
return append_sum
# ============ Combiners ============
def combine_count(aggs):
return aggs.sum(axis=0)
def combine_sum(aggs):
missing_val = _dynd_missing_types[aggs.dtype]
is_missing = _dynd_is_missing[aggs.dtype]
missing_vals = is_missing(aggs)
all_empty = np.bitwise_and.reduce(missing_vals, axis=0)
set_to_zero = missing_vals & ~all_empty
out = np.where(set_to_zero, 0, aggs).sum(axis=0)
if missing_val is not np.nan:
out[all_empty] = missing_val
return out
def combine_min(aggs):
return np.nanmin(aggs, axis=0)
def combine_max(aggs):
return np.nanmax(aggs, axis=0)
def combine_m2(Ms, sums, ns):
sums = as_float64(sums)
mu = np.nansum(sums, axis=0) / ns.sum(axis=0)
return np.nansum(Ms + ns*(sums/ns - mu)**2, axis=0)
# ============ Finalizers ============
@memoize
def build_finalize_min(dshape):
dtype = numpy_dtype(dshape.measure)
missing = _dynd_missing_types[dtype]
if np.issubdtype(dtype, np.floating):
return lambda x: np.where(np.isposinf(x), missing, x)
else:
value = np.iinfo(dtype).max
return lambda x: | np.where(x == value, missing, x) | numpy.where |
import numpy as np
from scipy.special import gamma, gammainc
def boys_function(m, T):
if np.ndim(T) > 0 and not isinstance(T, np.ndarray):
T = np.array(T)
else:
pass
if np.ndim(m) > 0 and not isinstance(m, np.ndarray):
m = np.array(m)
else:
pass
mp = m + (1 / 2)
# Limit for T -> 0
threshold = 1e-13
if np.ndim(T) > 0:
if np.any( | np.abs(T) | numpy.abs |
"""Test a trained classification model."""
import argparse
import numpy as np
import os
import sys
import torch
from pycls.core.config import assert_cfg
# from pycls.core.config import cfg
from pycls.utils.meters import TestMeter
import pycls.datasets.loader as imagenet_loader
import pycls.core.model_builder as model_builder
import pycls.datasets.loader as loader
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
from al_utils.data import Data as custom_Data
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(description="Test a trained classification model")
parser.add_argument("--cfg", dest="cfg_file", help="Config file", type=str)
parser.add_argument(
"opts",
help="See pycls/core/config.py for all options",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--model_path_file",
type=str,
default="",
help="Path of file containing model paths",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def log_model_info(model):
"""Logs model info"""
# print('Model:\n{}'.format(model))
print("Params: {:,}".format(mu.params_count(model)))
print("Flops: {:,}".format(mu.flops_count(model)))
@torch.no_grad()
def test_epoch(test_loader, model, test_meter, cur_epoch, cfg):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.0
totalSamples = 0.0
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce(cfg, [top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0) * cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(top1_err, inputs.size(0) * cfg.NUM_GPUS)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications / totalSamples
def test_model(test_acc, cfg):
"""Evaluates the model."""
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model(
cfg, active_sampling=cfg.ACTIVE_LEARNING.ACTIVATE, isDistributed=True
)
log_model_info(model)
# Load model weights
cu.load_checkpoint(cfg, cfg.TEST.WEIGHTS, model)
print("Loaded model weights from: {}".format(cfg.TEST.WEIGHTS))
# Create data loaders
# test_loader = loader.construct_test_loader()
if cfg.TRAIN.DATASET == "IMAGENET":
test_loader = imagenet_loader.construct_test_loader(cfg)
else:
dataObj = custom_Data(dataset=cfg.TRAIN.DATASET)
# print("=========== Loading testDataset ============")
was_eval = dataObj.eval_mode
dataObj.eval_mode = True
testDataset, _ = dataObj.getDataset(
save_dir=cfg.TEST_DIR, isTrain=False, isDownload=True
)
dataObj.eval_mode = was_eval
test_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=None,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=testDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
# Create meters
test_meter = TestMeter(len(test_loader), cfg)
# Evaluate the model
test_err = test_epoch(test_loader, model, test_meter, 0, cfg)
print("Test Accuracy: {:.3f}".format(100.0 - test_err))
if cfg.NUM_GPUS > 1:
test_acc.value = 100.0 - test_err
else:
return 100.0 - test_err
def test_single_proc_test(test_acc, cfg):
"""Performs single process evaluation."""
# Setup logging
lu.setup_logging(cfg)
# Show the config
# print('Config:\n{}'.format(cfg))
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Evaluate the model
if cfg.NUM_GPUS > 1:
test_model(test_acc, cfg)
else:
return test_model(test_acc, cfg)
def test_main(args, avail_nGPUS=4):
from pycls.core.config import cfg
test_acc = 0.0
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
# cfg.PORT = 10095
assert_cfg()
# avail_nGPUS = torch.cuda.device_count()
if cfg.NUM_GPUS > avail_nGPUS:
print(
"Available GPUS at test machine: ",
avail_nGPUS,
" but requested config has GPUS: ",
cfg.NUM_GPUS,
)
print(f"Running on {avail_nGPUS} instead of {cfg.NUM_GPUS}")
cfg.NUM_GPUS = avail_nGPUS
cfg.freeze()
dataset = cfg.TEST.DATASET
data_split = cfg.ACTIVE_LEARNING.DATA_SPLIT
seed_id = cfg.RNG_SEED
sampling_fn = cfg.ACTIVE_LEARNING.SAMPLING_FN
print("======================================")
print("~~~~~~ CFG.NUM_GPUS: ", cfg.NUM_GPUS)
print("======================================")
# Perform evaluation
if cfg.NUM_GPUS > 1:
test_acc = mpu.multi_proc_run_test(
num_proc=cfg.NUM_GPUS, fun=test_single_proc_test, fun_args=(cfg,)
)
else:
temp_acc = 0.0
test_acc = test_single_proc_test(temp_acc, cfg)
# Save test accuracy
test_model_path = cfg.TEST.WEIGHTS
test_model_name = np.array([test_model_path.split("/")[-1]])
file_name = "test_acc_"
file_save_path = cfg.OUT_DIR
if cfg.TRAIN.TRANSFER_EXP:
file_save_path = os.path.abspath(os.path.join(file_save_path, os.pardir))
# file_save_path= os.path.join(file_save_path,os.path.join("transfer_experiment",cfg.MODEL.TRANSFER_MODEL_TYPE+"_depth_"+str(cfg.MODEL.TRANSFER_MODEL_DEPTH)))#+"/"
file_save_path = os.path.join(file_save_path, file_name)
test_accuracy = np.array([test_acc], dtype="float")
temp_data = | np.column_stack((test_model_path, test_accuracy)) | numpy.column_stack |
import numpy.testing as npt
import pytest
from scipy.special import logsumexp
import numpy as np
from ..lemm import LEMM_Parameters, GLEMM_Parameters, GLEMM_Parameters_Untied
class Test_Parameters:
def test_lemm_parameters(self):
M = 10
m = 4
n = 2
logp = np.random.standard_normal((M,))
logp -= logsumexp(logp, keepdims=True)
V = np.random.randn(m, n)
TH = LEMM_Parameters(V, M, logp)
assert TH.M == M
assert TH.m == m
assert TH.n == n
assert not TH.gaussian
TH2 = LEMM_Parameters(V, M, None)
assert TH2.logp.shape == (M,)
npt.assert_allclose(np.sum(np.exp(TH2.logp)), 1.0)
with pytest.raises(ValueError):
LEMM_Parameters(V, M-1, logp)
def test_glemm_parameters(self):
M = 10
m = 4
n = 2
V = np.random.randn(m, n)
covars = [
('spherical', 1.0),
('diagonal', np.ones(n)),
('full', np.eye(n)),
]
for cv_type, covar in covars:
GLEMM_Parameters(V, M, None, cv_type, covar)
X = | np.random.randn(20, n) | numpy.random.randn |
from __future__ import absolute_import
from __future__ import print_function
from data_utils import load_dialog_task, vectorize_data, load_candidates, vectorize_candidates, vectorize_candidates_sparse, tokenize, r_load_dialog_task
from sklearn import metrics
import sklearn
from memn2n import MemN2NDialog
from itertools import chain
from six.moves import range, reduce
import sys
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
import numpy as np
import os
import pickle
import random
import time
import copy
tf.flags.DEFINE_float("learning_rate", 0.001, "Learning rate for Adam Optimizer.")
tf.flags.DEFINE_float("aux_learning_rate", 0.001, "Learning rate for aux Optimizer that updates anet using aux loss.")
tf.flags.DEFINE_float("outer_learning_rate", 0.001, "Learning rate for qnet Optimizer that updates qnet")
tf.flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam, rms Optimizer.")
tf.flags.DEFINE_float("max_grad_norm", 0.5, "Clip gradients to this norm.")
tf.flags.DEFINE_integer("evaluation_interval", 10, "Evaluate and print results every x epochs")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("hops", 3, "Number of hops in the Memory Network.")
tf.flags.DEFINE_integer("epochs", 200, "Number of epochs to train for.")
tf.flags.DEFINE_integer("embedding_size", 20, "Embedding size for embedding matrices.")
tf.flags.DEFINE_integer("memory_size", 250, "Maximum size of memory.")
tf.flags.DEFINE_integer("task_id", 5, "task id, 1 <= id <= 5")
tf.flags.DEFINE_integer("r_task_id", 5, "task id of the related task, 1 <= id <= 5")
tf.flags.DEFINE_integer("random_state", None, "Random state.")
tf.flags.DEFINE_string("data_dir", "../data/personalized-dialog-dataset/full", "Directory containing bAbI tasks")
tf.flags.DEFINE_string("test_data_dir", "../data/personalized-dialog-dataset/full", "Directory testing tasks")
tf.flags.DEFINE_string("r_data_dir", "../data/dialog-bAbI-tasks", "Directory containing original bAbI tasks")
tf.flags.DEFINE_string("model_dir", "gen/", "Directory containing memn2n model checkpoints")
tf.flags.DEFINE_string("restore_model_dir", "gen/", "Directory containing model for restore for training")
tf.flags.DEFINE_boolean('restore', False, 'if True,restore for training')
tf.flags.DEFINE_string("aux_opt", "adam", "optimizer for updating anet using aux loss")
tf.flags.DEFINE_string("aux_nonlin", None, "nonlinearity at the end of aux prediction/target, arctan")
tf.flags.DEFINE_boolean('has_qnet', True, 'if True, add question network')
tf.flags.DEFINE_boolean('train', True, 'if True, begin to train')
tf.flags.DEFINE_boolean('sep_test', False, 'if True, load test data from a test data dir')
tf.flags.DEFINE_boolean('OOV', False, 'if True, use OOV test set')
tf.flags.DEFINE_boolean('save_vocab', False, 'if True, saves vocabulary')
tf.flags.DEFINE_boolean('load_vocab', False, 'if True, loads vocabulary instead of building it')
tf.flags.DEFINE_boolean('alternate', False, 'if True, alternate training between primary and related every epoch, else do it every batch')
tf.flags.DEFINE_boolean('only_aux', False, 'if True, train anet using only aux, update qnet using full primary task data')
tf.flags.DEFINE_boolean('only_primary', False, 'if True, train anet using only primary')
tf.flags.DEFINE_boolean('m_series', False, 'if True, m_series is set')
tf.flags.DEFINE_boolean('only_related', False, 'if True, train qnet using only related tasks')
tf.flags.DEFINE_boolean('copy_qnet2anet', False, 'if True copy qnet to anet before starting training')
tf.flags.DEFINE_boolean('transform_qnet', False, 'if True train qnet_aux with primary data to match anet u_k')
tf.flags.DEFINE_boolean('transform_anet', False, 'if True train anet(u_k) with related data to match qnet_aux')
tf.flags.DEFINE_boolean('primary_and_related', False, 'if True train anet(u_k) with related data and primary data')
tf.flags.DEFINE_boolean('gated_qnet', False, 'gated qnet')
tf.flags.DEFINE_float("outer_r_weight", 0, "Weight of the related task loss in the outer loop")
tf.flags.DEFINE_integer("qnet_hops", 3, "Number of hops in the qnet Memory Network.")
tf.flags.DEFINE_boolean('copy_qnet2gqnet', False, 'if True copy qnet to gated qnet before starting training')
tf.flags.DEFINE_boolean('separate_eval', False, 'if True split eval data from primary')
tf.flags.DEFINE_boolean('r1', False, 'if True second related task')
tf.flags.DEFINE_string("r1_data_dir", "../data/personalized-dialog-dataset/small-r1-10", "Directory containing r1 related tasks")
tf.flags.DEFINE_string("gate_nonlin", None, "nonlinearity at the end gated qnet")
tf.flags.DEFINE_boolean('only_gated_qnet', False, 'if True update only gated qnet')
tf.flags.DEFINE_boolean('only_gated_aux', False, 'if True update only anet with gated_aux')
tf.flags.DEFINE_boolean('only_gated_aux_primary', False, 'if True update only anet with gated aux and with primary')
tf.flags.DEFINE_integer("inner_steps", 1, "Number of inner loop steps")
tf.flags.DEFINE_boolean('val_loss', False, 'if True calculate loss instead of accuracy for model selection')
tf.flags.DEFINE_boolean('undo_anet', False, 'if True reinitialize anet')
tf.flags.DEFINE_integer("primary_update_interval", 1, "Interval between primary updates")
tf.flags.DEFINE_boolean('set_gate', False, 'if True set the gate values')
tf.flags.DEFINE_integer("n_outer_batches", 1, "number of outer batches for eval")
tf.flags.DEFINE_boolean('separate_eval_inner', False, 'if True split eval data from primary for inner')
tf.flags.DEFINE_boolean('separate_eval_outer', False, 'if True split eval data from primary for outer')
tf.flags.DEFINE_boolean('analysis_mode', False, 'if True print intermediate values')
tf.flags.DEFINE_float("r_gated_opt_lr", 0.001, "Learning rate for the aux update.")
tf.flags.DEFINE_float("anet_l2_coeff", 0, "coeff to anet l2 loss")
tf.flags.DEFINE_float("gqnet_l2_coeff", 0, "coeff to gqnet l2 loss")
tf.flags.DEFINE_boolean('set_aux_gate_vec', False, 'if True get aux_gate')
tf.flags.DEFINE_integer("split_ratio", 2, "how to split the primary data for training")
tf.flags.DEFINE_float("gated_outer_opt_lr", 0.001, "Learning rate for gqnet Optimizer that updates gqnet")
tf.flags.DEFINE_boolean('l2_bias', False, 'if True add l2 loss to bias term in gqnet')
tf.flags.DEFINE_boolean('joint_gated_qnet', False, 'gated qnet')
tf.flags.DEFINE_integer("n_gqnet_update", 1, "how many times to update gqnet before updating anet")
tf.flags.DEFINE_boolean('print_bias', False, 'if True pring gqnets bias term')
tf.flags.DEFINE_boolean('r', True, 'if False, no orig bAbI as related task')
tf.flags.DEFINE_float("primary_coeff", 1.0, "sclaing the primary loss")
tf.flags.DEFINE_boolean('stop_grad_adam', False, 'if True stop grad in adam optimizer simulation')
tf.flags.DEFINE_boolean('single_batch', False, 'if True use single batch of primary, related for training')
tf.flags.DEFINE_boolean('inner_primary', True, 'if true use primary in the inner loop')
tf.flags.DEFINE_boolean('inner_sgd', False, 'if true use sgd in the inner loop')
tf.flags.DEFINE_boolean('subtract_pre_outer', False, 'if true subtract pre from outer')
tf.flags.DEFINE_boolean('new', True, 'if false use old babi as the main task')
tf.flags.DEFINE_boolean('small_candidates', False, 'if True, use small candidate list')
tf.flags.DEFINE_boolean('only_bad', False, 'if True using only bad data for related tasks')
tf.flags.DEFINE_boolean('only_gqnet_inside', False, 'if True update only gqnet inside joint_gated_qnet')
tf.flags.DEFINE_integer("n_aux_update", 1, "how many times to update anet with aux before doing primary")
tf.flags.DEFINE_float("gate_threshold", 0.5, "thresholding for set_aux_gate_vec")
tf.flags.DEFINE_boolean('bad_and_good', False, 'if True both good and bad data in r1')
tf.flags.DEFINE_boolean('only_good', False, 'if True remove bad from r1')
tf.flags.DEFINE_boolean('only_gated_related', False, 'if True under primary_and_rleated only do primary')
tf.flags.DEFINE_boolean('use_answer', False, 'if True use answer in gated')
tf.flags.DEFINE_boolean('no_r1', False, 'if True dont use r1')
tf.flags.DEFINE_boolean('rand_aux_gate', False, 'if True use random aux gate')
tf.flags.DEFINE_boolean('changing_rand_aux_gate', False, 'if True use changing random aux gate')
FLAGS = tf.flags.FLAGS
print("Started Task :)) :", FLAGS.task_id)
class chatBot(object):
def __init__(self, data_dir, r_data_dir, model_dir, result_dir, task_id, r_task_id,
OOV=False,
has_qnet =False,
memory_size=250,
random_state=None,
batch_size=32,
learning_rate=0.001,
epsilon=1e-8,
alpha=0.9,
max_grad_norm=0.5,
evaluation_interval=10,
hops=3,
epochs=200,
embedding_size=20,
save_vocab=False,
load_vocab=False,
alternate=True,
only_aux=False,
aux_opt='adam',
aux_learning_rate=0.001,
outer_learning_rate=0.001,
only_primary=False,
aux_nonlin=None,
m_series=False,
only_related=False,
transform_qnet=False,
transform_anet=False,
primary_and_related=False,
gated_qnet=False,
outer_r_weight=0):
"""Creates wrapper for training and testing a chatbot model.
Args:
data_dir: Directory containing personalized dialog tasks.
r_data_dir: Directory containing related task's data
model_dir: Directory containing memn2n model checkpoints.
aux_opt: Optimizer for updating anet using aux loss.
task_id: Personalized dialog task id, 1 <= id <= 5. Defaults to `1`.
r_task_id: Related tasks task id.
OOV: If `True`, use OOV test set. Defaults to `False`
has_qnet: If True, add question network
memory_size: The max size of the memory. Defaults to `250`.
random_state: Random state to set graph-level random seed. Defaults to `None`.
batch_size: Size of the batch for training. Defaults to `32`.
learning_rate: Learning rate for Adam Optimizer. Defaults to `0.001`.
epsilon: Epsilon value for Adam Optimizer. Defaults to `1e-8`.
alpha: decay of rmsprop optimizer.
max_gradient_norm: Maximum L2 norm clipping value. Defaults to `0.5`.
evaluation_interval: Evaluate and print results every x epochs.
Defaults to `10`.
hops: The number of hops over memory for responding. A hop consists
of reading and addressing a memory slot. Defaults to `3`.
epochs: Number of training epochs. Defualts to `200`.
embedding_size: The size of the word embedding. Defaults to `20`.
save_vocab: If `True`, save vocabulary file. Defaults to `False`.
load_vocab: If `True`, load vocabulary from file. Defaults to `False`.
alternate: If True alternate between primary and related every epoch
only_aux: Update anet using only aux and update qnet
aux_learning_rate: lr of aux update to anet
outer_learning_rate: lr for update qnet
only_primary: train on only primary data
aux_nonlin: non linearity at the end of aux pred/targ
m_series: m_series is set if true
only_related: If true train qnet with related task data
"""
self.data_dir = data_dir
self.r_data_dir = r_data_dir
self.task_id = task_id
self.r_task_id = r_task_id
self.model_dir = model_dir
self.result_dir = result_dir
self.OOV = OOV
self.has_qnet = has_qnet
self.memory_size = memory_size
self.random_state = random_state
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.max_grad_norm = max_grad_norm
self.evaluation_interval = evaluation_interval
self.hops = hops
self.epochs = epochs
self.embedding_size = embedding_size
self.save_vocab = save_vocab
self.load_vocab = load_vocab
self.alternate = alternate
self.only_aux = only_aux
self.aux_opt = aux_opt
self.aux_learning_rate = aux_learning_rate
self.outer_learning_rate = outer_learning_rate
self.only_primary = only_primary
self.aux_nonlin = aux_nonlin
self.m_series = m_series
self.only_related = only_related
self.transform_qnet = transform_qnet
self.transform_anet = transform_anet
self.primary_and_related = primary_and_related
self.gated_qnet = gated_qnet
self.outer_r_weight = outer_r_weight
candidates,self.candid2indx = load_candidates(self.data_dir, self.task_id, True, FLAGS.r1, FLAGS.new, FLAGS.small_candidates)
self.n_cand = len(candidates)
print("Candidate Size", self.n_cand)
self.indx2candid = dict((self.candid2indx[key],key)
for key in self.candid2indx)
if self.has_qnet:
r_candidates, self.r_candid2indx = load_candidates(self.r_data_dir, self.task_id, False, FLAGS.r1, FLAGS.new, FLAGS.small_candidates, FLAGS.bad_and_good)
self.r_n_cand = len(r_candidates)
print("R Candidate Size", self.r_n_cand)
self.r_indx2candid = dict((self.r_candid2indx[key], key)
for key in self.r_candid2indx)
# Task data
if FLAGS.new:
self.trainData, self.testData, self.valData = load_dialog_task(
self.data_dir, self.task_id, self.candid2indx, self.OOV)
else:
self.trainData, self.testData, self.valData = r_load_dialog_task(
self.data_dir, self.task_id, self.candid2indx, self.OOV)
data = self.trainData + self.testData + self.valData
if self.has_qnet:
if FLAGS.small_candidates:
only_train=True
else:
only_train = False
self.r_trainData, self.r_testData, self.r_valData = r_load_dialog_task(
self.r_data_dir, self.r_task_id, self.r_candid2indx, self.OOV, only_train = only_train)
if FLAGS.small_candidates:
data = data + self.r_trainData
else:
data = data + self.r_trainData + self.r_valData + self.r_testData
if FLAGS.r1 and not FLAGS.no_r1:
if FLAGS.new:
self.r1_trainData, _, _ = load_dialog_task(
FLAGS.r1_data_dir, self.r_task_id, self.r_candid2indx, self.OOV, only_train = only_train)
else:
self.r1_trainData, _, _ = r_load_dialog_task(
FLAGS.r1_data_dir, self.r_task_id, self.r_candid2indx, self.OOV, only_train = only_train)
data = data + self.r1_trainData
if self.has_qnet:
self.build_vocab(data,candidates,self.save_vocab,self.load_vocab, r_candidates)
self.word_idx
self.indx2word = dict((self.word_idx[key],key)
for key in self.word_idx)
self.candidates_vec = vectorize_candidates(
candidates,self.word_idx,self.candidate_sentence_size)
if self.has_qnet:
self.r_candidates_vec = vectorize_candidates(
r_candidates,self.word_idx,self.r_candidate_sentence_size)
else:
self.r_candidates_vec = None
if FLAGS.sep_test:
_, self.sep_testData, _ = load_dialog_task(
FLAGS.test_data_dir, self.task_id, self.candid2indx, self.OOV)
optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate, epsilon=self.epsilon, name='opt')
if self.aux_opt == 'sgd':
aux_optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.aux_learning_rate, name='aux_opt')
elif self.aux_opt == 'adam':
aux_optimizer = tf.train.AdamOptimizer(learning_rate=self.aux_learning_rate, epsilon=self.epsilon, name='aux_opt')
elif self.aux_opt == 'rms':
aux_optimizer = tf.train.RMSPropOptimizer(learning_rate=self.aux_learning_rate, decay=self.alpha, epsilon=self.epsilon, name='aux_opt')
else:
print("unknown aux optimizer")
outer_optimizer = tf.train.AdamOptimizer(
learning_rate=self.outer_learning_rate, epsilon=self.epsilon, name='outer_opt')
self.sess = tf.Session(config=config)
self.model = MemN2NDialog(self.has_qnet, self.batch_size, self.vocab_size, self.n_cand,
self.sentence_size, self.embedding_size,
self.candidates_vec, self.candidate_sentence_size, session=self.sess,
hops=self.hops, max_grad_norm=self.max_grad_norm,
optimizer=optimizer, outer_optimizer=outer_optimizer, aux_optimizer=aux_optimizer, task_id=task_id,
inner_lr=self.aux_learning_rate, aux_opt_name=self.aux_opt, alpha=self.alpha,
epsilon=self.epsilon, aux_nonlin=self.aux_nonlin, m_series=self.m_series,
r_candidates_vec=self.r_candidates_vec, outer_r_weight=self.outer_r_weight,
qnet_hops = FLAGS.qnet_hops, gate_nonlin=FLAGS.gate_nonlin, inner_steps=FLAGS.inner_steps, r_candidates_size=self.r_n_cand,
set_gate=FLAGS.set_gate, r_gated_opt_lr=FLAGS.r_gated_opt_lr, anet_l2_coeff=FLAGS.anet_l2_coeff,
gqnet_l2_coeff=FLAGS.gqnet_l2_coeff, set_aux_gate_vec=FLAGS.set_aux_gate_vec, gated_outer_opt_lr=FLAGS.gated_outer_opt_lr,
l2_bias=FLAGS.l2_bias, primary_coeff=FLAGS.primary_coeff, n_outer_batches=FLAGS.n_outer_batches, stop_grad_adam=FLAGS.stop_grad_adam,
inner_primary=FLAGS.inner_primary, inner_sgd=FLAGS.inner_sgd, subtract_pre_outer=FLAGS.subtract_pre_outer, use_answer=FLAGS.use_answer)
self.saver = tf.train.Saver(max_to_keep=50)
self.summary_writer = tf.summary.FileWriter(
self.result_dir, self.model.graph_output.graph)
def build_vocab(self,data,candidates,save=False,load=False, r_candidates=None):
"""Build vocabulary of words from all dialog data and candidates."""
if load:
# Load from vocabulary file
vocab_file = open('vocab.obj', 'rb')
vocab = pickle.load(vocab_file)
else:
if self.has_qnet and not self.m_series:
vocab = reduce(lambda x, y: x | y,
(set(list(chain.from_iterable(s)) + q + q_a)
for s, q, a, q_a in data))
else:
vocab = reduce(lambda x, y: x | y,
(set(list(chain.from_iterable(s)) + q)
for s, q, a, q_a in data))
vocab |= reduce(lambda x,y: x|y,
(set(candidate) for candidate in candidates) )
if self.has_qnet and self.m_series:
vocab |= reduce(lambda x, y: x | y,
(set(r_candidate) for r_candidate in r_candidates))
vocab = sorted(vocab)
self.word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _, _ in data)))
mean_story_size = int(np.mean([ len(s) for s, _, _, _ in data ]))
self.sentence_size = max(map(len, chain.from_iterable(s for s, _, _, _ in data)))
self.candidate_sentence_size=max(map(len,candidates))
if self.has_qnet:
self.r_candidate_sentence_size=max(map(len,r_candidates))
query_size = max(map(len, (q for _, q, _, _ in data)))
q_answer_size = max(map(len, (q_a for _, _, _, q_a in data)))
self.memory_size = min(self.memory_size, max_story_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for nil word
if self.has_qnet and not self.m_series:
self.sentence_size = max(query_size, self.sentence_size, q_answer_size) # for the position
else:
self.sentence_size = max(query_size, self.sentence_size) # for the position
# Print parameters
print("vocab size:", self.vocab_size)
print("Longest sentence length", self.sentence_size)
print("Longest candidate sentence length", self.candidate_sentence_size)
if self.has_qnet and self.m_series:
print("Longest r_candidate sentence length", self.r_candidate_sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
# Save to vocabulary file
if save:
vocab_file = open('vocab.obj', 'wb')
pickle.dump(vocab, vocab_file)
def train(self):
"""Runs the training algorithm over training set data.
Performs validation at given evaluation intervals.
"""
if FLAGS.restore:
model_dir = 'model/' + str(FLAGS.task_id) + '/' + FLAGS.restore_model_dir
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
print("restore_path", ckpt.model_checkpoint_path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Restored checkpoint")
else:
print("...no checkpoint found...")
sys.exit(1)
trainS, trainQ, trainA, trainqA = vectorize_data(
self.trainData, self.word_idx, self.sentence_size, self.candidate_sentence_size,
self.batch_size, self.n_cand, self.memory_size)
if self.has_qnet:
r_trainS, r_trainQ, r_trainA, r_trainqA = vectorize_data(
self.r_trainData, self.word_idx, self.sentence_size, self.r_candidate_sentence_size,
self.batch_size, self.r_n_cand, self.memory_size)
n_r_train = len(r_trainS)
print("Related Task Trainign Size", n_r_train)
if FLAGS.r1 and not FLAGS.no_r1:
r1_trainS, r1_trainQ, r1_trainA, r1_trainqA = vectorize_data(
self.r1_trainData, self.word_idx, self.sentence_size, self.r_candidate_sentence_size,
self.batch_size, self.r_n_cand, self.memory_size)
n_r1_train = len(r1_trainS)
print("Second Related Task Training Size", n_r1_train)
# A_qA = list(zip(r1_trainA, r1_trainqA))
# np.random.seed(0)
# np.random.shuffle(A_qA)
# r1_trainA, r1_trainqA = zip(*A_qA)
# print(len(r1_trainA), r1_trainA)
np.random.seed(0)
if FLAGS.bad_and_good:
# r1_trainA_bad = np.random.randint(low=0, high=self.r_n_cand, size=int(2*len(r1_trainA)/3))
# r1_trainA = list(r1_trainA_bad) + list(r1_trainA[len(r1_trainA_bad):])
# n_r1_bad = len(r1_trainA_bad)
# A_qA = list(zip(r1_trainA[:int(2*len(r1_trainA)/3)], r1_trainqA[int(2*len(r1_trainqA)/3)]))
np.random.seed(0)
# np.random.shuffle(A_qA)
# r1_trainA_bad, r1_trainqA_bad = zip(*A_qA)
r1_trainA_bad, r1_trainqA_bad = sklearn.utils.shuffle(r1_trainA, r1_trainqA)
n_r1_bad = int(2*len(r1_trainA)/3)
r1_trainA = list(r1_trainA_bad[:n_r1_bad]) + list(r1_trainA[n_r1_bad:])
r1_trainqA = r1_trainqA_bad[:n_r1_bad] + r1_trainqA[n_r1_bad:]
print(np.asarray(r1_trainA).shape, np.asarray(r1_trainqA).shape)
else:
r1_trainA = np.random.randint(low=0, high=self.r_n_cand, size=len(r1_trainA))
# A_qA = list(zip(r1_trainA, r1_trainqA))
# np.random.seed(0)
# np.random.shuffle(A_qA)
# r1_trainA, r1_trainqA = zip(*A_qA)
# print(len(r1_trainA), r1_trainA)
if not FLAGS.no_r1:
if not FLAGS.only_good:
r_trainS = r_trainS[0:int(n_r_train/self.batch_size)*self.batch_size] + r1_trainS
r_trainQ = r_trainQ[0:int(n_r_train/self.batch_size)*self.batch_size] + r1_trainQ
r_trainA = r_trainA[0:int(n_r_train/self.batch_size)*self.batch_size] + list(r1_trainA)
r_trainqA = r_trainqA[0:int(n_r_train/self.batch_size)*self.batch_size] + r1_trainqA
else:
r_trainS = r_trainS[0:int(n_r_train / self.batch_size) * self.batch_size] + r1_trainS[n_r1_bad:]
r_trainQ = r_trainQ[0:int(n_r_train / self.batch_size) * self.batch_size] + r1_trainQ[n_r1_bad:]
r_trainA = r_trainA[0:int(n_r_train / self.batch_size) * self.batch_size] + list(r1_trainA[n_r1_bad:])
r_trainqA = r_trainqA[0:int(n_r_train / self.batch_size) * self.batch_size] + r1_trainqA[n_r1_bad:]
n_r1_train = len(r1_trainS[n_r1_bad:])
print("Second Related Task Training Size", n_r1_train)
else:
n_r1_train=1
# print("new", r_trainA)
n_r_orig_train = len(r_trainS[0:int(n_r_train/self.batch_size)*self.batch_size])
n_r_train = len(r_trainS)
print("joint related task training size", n_r_train)
else:
n_r_orig_train = len(r_trainS[0:int(n_r_train/self.batch_size)*self.batch_size])
n_r1_train = 1
valS, valQ, valA, _ = vectorize_data(
self.valData, self.word_idx, self.sentence_size, self.candidate_sentence_size,
self.batch_size, self.n_cand, self.memory_size)
if self.has_qnet and self.m_series and not FLAGS.small_candidates:
r_valS, r_valQ, r_valA, _ = vectorize_data(
self.r_valData, self.word_idx, self.sentence_size, self.r_candidate_sentence_size,
self.batch_size, self.r_n_cand, self.memory_size)
n_r_val = len(r_valS)
n_train = len(trainS)
n_val = len(valS)
print("Training Size", n_train)
print("Validation Size", n_val)
tf.set_random_seed(self.random_state)
batches = zip(range(0, n_train-self.batch_size, self.batch_size),
range(self.batch_size, n_train, self.batch_size))
batches = [(start, end) for start, end in batches]
if FLAGS.single_batch:
if FLAGS.only_bad:
batches = [(start, end) for start, end in batches[0:5]]
else:
batches = [(start, end) for start, end in batches[0:10]]
best_validation_accuracy=-1
best_validation_loss = np.inf
best_validation_epoch = 0
best_held_out_primary_acc = 0
best_held_out_primary_acc_epoch = 0
if self.has_qnet:
np.random.seed(0)
np.random.shuffle(batches)
if FLAGS.split_ratio == 3:
p_batches = batches[:int(2*len(batches)/3)]
r_batches_p = batches[int(2*len(batches)/3):]
elif FLAGS.split_ratio == 2:
p_batches = batches[:int(len(batches)/2)]
r_batches_p = batches[int(len(batches)/2):]
r_batches_r = zip(range(0, n_r_train-self.batch_size, self.batch_size),
range(self.batch_size, n_r_train, self.batch_size))
r_batches_r = [(start, end) for start, end in r_batches_r]
if not FLAGS.r:
r_batches_r = zip(range(n_r_orig_train, n_r_train - self.batch_size, self.batch_size),
range(n_r_orig_train + self.batch_size, n_r_train, self.batch_size))
r_batches_r = [(start, end) for start, end in r_batches_r]
if FLAGS.single_batch:
if FLAGS.only_bad:
r_batches_r = [(start, end) for start, end in r_batches_r[0:5]] # good data
n_r1_train = self.batch_size * 5
n_r_train = n_r1_train + 1
else:
r_batches_r = [(start, end) for start, end in r_batches_r[0:5]] # good data
r_batches_r_bad = zip(range(n_r_orig_train, n_r_train - self.batch_size, self.batch_size),
range(n_r_orig_train + self.batch_size, n_r_train, self.batch_size))
r_batches_r_bad = [(start, end) for start, end in r_batches_r_bad]
r_batches_r += [(start, end) for start, end in r_batches_r_bad[5:10]]
n_r1_train = self.batch_size * 5 # true only when r = False
n_r_train = n_r1_train + self.batch_size * 5
# for start, end in batches:
# print('batches', trainQ[start], trainA[start])
#
# for start, end in r_batches_r:
# print('r_batches', r_trainQ[start], r_trainA[start])
if FLAGS.rand_aux_gate:
rand_aux_gate = np.random.rand(n_r_train)
# Training loop
start_time = time.process_time()
if FLAGS.copy_qnet2anet:
self.model.copy_qnet2anet()
print("Qnet copied to anet")
if FLAGS.copy_qnet2gqnet:
self.model.copy_qnet2gqnet()
print("Qnet copied to gated qnet")
if FLAGS.undo_anet:
self.model.initialize_anet()
print("Anet reinitialized")
train_acc_ph = tf.placeholder(tf.float32, shape=(), name="train_acc_ph")
val_acc_ph = tf.placeholder(tf.float32, shape=(), name="val_acc_ph")
outer_train_acc_ph = tf.placeholder(tf.float32, shape=(), name="outer_train_acc_ph")
total_outer_cost_ph = tf.placeholder(tf.float32, shape=(), name="total_outer_cost_ph")
total_aux_cost_ph = tf.placeholder(tf.float32, shape=(), name="total_aux_cost_ph")
total_primary_cost_ph = tf.placeholder(tf.float32, shape=(), name="total_primary_cost_ph")
total_r_gated_loss_0_ph = tf.placeholder(tf.float32, shape=(), name="total_r_gated_loss_0_ph")
total_loss_0_ph = tf.placeholder(tf.float32, shape=(), name="total_loss_0_ph")
total_loss_temp_ph = tf.placeholder(tf.float32, shape=(), name="total_loss_temp_ph")
total_r_gated_loss_i_ph = tf.placeholder(tf.float32, shape=(), name="total_r_gated_loss_i_ph")
total_loss_i_ph = tf.placeholder(tf.float32, shape=(), name="total_loss_i_ph")
total_anet_l2_loss_ph = tf.placeholder(tf.float32, shape=(), name="total_anet_l2_loss_ph")
total_gated_qnet_l2_loss_ph = tf.placeholder(tf.float32, shape=(), name="total_gated_qnet_l2_loss_ph")
train_acc_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'train_acc',
train_acc_ph)
val_acc_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'val_acc',
val_acc_ph)
if FLAGS.r1 and FLAGS.joint_gated_qnet:
r1_ratio_ph = tf.placeholder(tf.float32, shape=(), name="r1_ratio_ph")
r1_ratio_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'r1_ratio',
r1_ratio_ph)
r_ratio_ph = tf.placeholder(tf.float32, shape=(), name="r_ratio_ph")
r_ratio_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'r_ratio',
r_ratio_ph)
avg_gated_outer_grads_norm_ph = tf.placeholder(tf.float32, shape=(), name="avg_gated_outer_grads_norm_ph")
avg_gated_outer_grads_norm_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'avg_gated_outer_grads_norm',
avg_gated_outer_grads_norm_ph)
# merged_summary = tf.summary.merge([train_acc_summary, val_acc_summary, r1_ratio_summary, r_ratio_summary])
total_outer_cost_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_outer_cost',
total_outer_cost_ph)
total_aux_cost_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_aux_cost',
total_aux_cost_ph)
total_primary_cost_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_primary_cost',
total_primary_cost_ph)
total_r_gated_loss_0_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_r_gated_loss_0',
total_r_gated_loss_0_ph)
total_loss_0_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_loss_0',
total_loss_0_ph)
total_loss_temp_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_loss_temp',
total_loss_temp_ph)
total_r_gated_loss_i_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_r_gated_loss_i',
total_r_gated_loss_i_ph)
total_loss_i_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_loss_i',
total_loss_i_ph)
total_anet_l2_loss_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_anet_l2_loss',
total_anet_l2_loss_ph)
total_gated_qnet_l2_loss_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'total_gated_qnet_l2_loss',
total_gated_qnet_l2_loss_ph)
if FLAGS.separate_eval or FLAGS.separate_eval_inner:
outer_train_acc_summary = tf.summary.scalar(
'task_' + str(self.task_id) + '/' + 'outer_train_acc',
outer_train_acc_ph)
merged_summary = tf.summary.merge(
[train_acc_summary, val_acc_summary, r1_ratio_summary, r_ratio_summary, outer_train_acc_summary, avg_gated_outer_grads_norm_summary,
total_outer_cost_summary, total_aux_cost_summary, total_primary_cost_summary, total_r_gated_loss_0_summary, total_loss_0_summary, total_loss_temp_summary,
total_r_gated_loss_i_summary, total_loss_i_summary, total_anet_l2_loss_summary, total_gated_qnet_l2_loss_summary])
else:
merged_summary = tf.summary.merge(
[train_acc_summary, val_acc_summary, r1_ratio_summary, r_ratio_summary, avg_gated_outer_grads_norm_summary,
total_outer_cost_summary, total_aux_cost_summary, total_primary_cost_summary, total_r_gated_loss_0_summary, total_loss_0_summary, total_loss_temp_summary,
total_r_gated_loss_i_summary, total_loss_i_summary, total_anet_l2_loss_summary, total_gated_qnet_l2_loss_summary])
else:
merged_summary = tf.summary.merge([train_acc_summary, val_acc_summary])
aux_gate_values_curr = np.zeros((n_r_train))
aux_gate_values = np.zeros((n_r_train))
aux_gate_count = np.zeros((n_r_train))
r_batches_r_fixed = copy.deepcopy(r_batches_r)
for t in range(1, self.epochs+1):
print('Epoch', t)
np.random.shuffle(batches)
total_cost = 0.0
if self.has_qnet:
np.random.shuffle(p_batches)
np.random.shuffle(r_batches_p)
np.random.shuffle(r_batches_r)
if self.only_aux:
count = 0
for r_start, r_end in r_batches_r:
count +=1
start, end = random.sample(batches, 1)[0]
r_s_p = trainS[start:end]
r_q_p = trainQ[start:end]
r_a_p = trainA[start:end]
r_q_a_p = trainqA[start:end]
r_s = r_trainS[r_start:r_end]
r_q = r_trainQ[r_start:r_end]
r_a = r_trainA[r_start:r_end]
r_q_a = r_trainqA[r_start:r_end]
outer_cost_t, aux_cost_t = self.model.q_batch_fit(r_s, r_q, r_a, r_q_a, r_s_p, r_q_p, r_a_p, False) # related
cost_t = outer_cost_t
if count%100 == 0:
print('outer_cost', outer_cost_t, 'aux_cost', aux_cost_t)
total_cost += cost_t
elif self.only_primary:
if FLAGS.separate_eval:
for start, end in p_batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
# q_a = trainqA[start:end]
cost_t = self.model.batch_fit(s, q, a)
total_cost += cost_t
else:
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
# q_a = trainqA[start:end]
cost_t = self.model.batch_fit(s, q, a)
total_cost += cost_t
elif self.only_related:
for start, end in r_batches_r:
s = r_trainS[start:end]
q = r_trainQ[start:end]
a = r_trainA[start:end]
# q_a = trainqA[start:end]
cost_t = self.model.q_batch_fit_r(s, q, a)
total_cost += cost_t
elif self.transform_qnet:
for start, end in batches:
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
# q_a = trainqA[start:end]
cost_t = self.model.batch_fit_qt(s, q, a)
total_cost += cost_t
elif self.transform_anet:
for start, end in r_batches_r:
s = r_trainS[start:end]
q = r_trainQ[start:end]
a = r_trainA[start:end]
# q_a = trainqA[start:end]
cost_t = self.model.batch_fit_at(s, q, a)
total_cost += cost_t
elif self.primary_and_related:
count = 0
for r_start, r_end in r_batches_r:
s = r_trainS[r_start:r_end]
q = r_trainQ[r_start:r_end]
a = r_trainA[r_start:r_end]
# q_a = trainqA[start:end]
cost_t_related = self.model.batch_fit(s, q, a, primary=False)
if not FLAGS.only_gated_related:
if FLAGS.separate_eval:
start, end = random.sample(p_batches, 1)[0]
else:
start, end = random.sample(batches, 1)[0]
# start, end = random.sample(batches, 1)[0]
s = trainS[start:end]
q = trainQ[start:end]
a = trainA[start:end]
# q_a = trainqA[start:end]
cost_t_primary = self.model.batch_fit(s, q, a)
else:
cost_t_primary = 0
if count % 100 == 0:
print("related", cost_t_related, "primary", cost_t_primary)
total_cost += cost_t_related + cost_t_primary
count += 1
elif FLAGS.only_gated_qnet:
if FLAGS.inner_steps == 1:
count = 0
gate_r1 = 0
gate_joint_r = 0
for r_start, r_end in r_batches_r:
count += 1
if FLAGS.separate_eval:
start, end = random.sample(r_batches_p, 1)[0]
else:
start, end = random.sample(batches, 1)[0]
r_s_p = trainS[start:end]
r_q_p = trainQ[start:end]
r_a_p = trainA[start:end]
r_q_a_p = trainqA[start:end]
r_s = r_trainS[r_start:r_end]
r_q = r_trainQ[r_start:r_end]
r_a = r_trainA[r_start:r_end]
r_q_a = r_trainqA[r_start:r_end]
cost_t_outer, aux_gate = self.model.gated_q_batch_fit(r_s, r_q, r_a, r_q_a, r_s_p, r_q_p, r_a_p) #gated qnet update
total_cost += cost_t_outer
if r_start >= n_r_orig_train:
gate_r1 += np.sum(aux_gate)
else:
gate_joint_r += np.sum(aux_gate)
if count % 100 == 0:
print("count", count, "outer", cost_t_outer)
print("Ratio of gate_r1/r1: ", gate_r1/n_r1_train, "Ratio of gate_joint_r/joint_r", gate_joint_r/(n_r_train-n_r1_train))
else:
count = 0
gate_r1 = 0
gate_joint_r = 0
total_epoch_cost = 0
r_s_list = []
r_q_list = []
r_a_list = []
r_q_a_list = []
w_gated_q_B_list = []
s_p_list = []
q_p_list = []
a_p_list = []
q_a_p_list = []
# r_s_list1 = np.zeros((FLAGS.inner_steps, ))
for r_start, r_end in r_batches_r:
count += 1
r_s = r_trainS[r_start:r_end]
r_q = r_trainQ[r_start:r_end]
r_a = r_trainA[r_start:r_end]
r_q_a = r_trainqA[r_start:r_end]
aux_gate = self.model.get_aux_gate(r_s, r_q)
if r_start >= n_r_orig_train:
gate_r1 += np.sum(aux_gate)
else:
gate_joint_r += np.sum(aux_gate)
r_s_list.append(r_s)
# print(np.array(r_s).shape)
r_q_list.append(r_q)
r_a_list.append(r_a)
r_q_a_list.append(r_q_a)
if FLAGS.set_gate:
if r_start >= n_r_orig_train:
w_gated_q_B = np.ones((1)) * -100000
else:
w_gated_q_B = np.ones((1)) * 100000
w_gated_q_B_list.append(w_gated_q_B)
if FLAGS.separate_eval_inner:
start_p, end_p = random.sample(p_batches, 1)[0]
else:
start_p, end_p = random.sample(batches, 1)[0]
s_p = trainS[start_p:end_p]
q_p = trainQ[start_p:end_p]
a_p = trainA[start_p:end_p]
q_a_p = trainqA[start_p:end_p]
s_p_list.append(s_p)
q_p_list.append(q_p)
a_p_list.append(a_p)
q_a_p_list.append(q_a_p)
if count % FLAGS.inner_steps == 0:
if FLAGS.separate_eval_outer:
start, end = random.sample(r_batches_p, 1)[0]
else:
start, end = random.sample(batches, 1)[0]
r_s_p = trainS[start:end]
r_q_p = trainQ[start:end]
r_a_p = np.asarray(trainA[start:end])
r_q_a_p = trainqA[start:end]
#used if outer_r_weight > 0
r_start1, r_end1 = random.sample(r_batches_r, 1)[0]
r_s1 = r_trainS[r_start1:r_end1]
r_q1 = r_trainQ[r_start1:r_end1]
r_a1 = np.asarray(r_trainA[r_start1:r_end1])
r_q_a1 = r_trainqA[r_start1:r_end1]
for j in range(FLAGS.n_outer_batches-1):
if FLAGS.separate_eval:
start, end = random.sample(r_batches_p, 1)[0]
else:
start, end = random.sample(batches, 1)[0]
r_s_p = | np.vstack((r_s_p, trainS[start:end])) | numpy.vstack |
import numpy as np
import torch
import gym
import argparse
import os
from collections import deque
import utils
import TD3
import OurDDPG
import DDPG
import TD3_ad
import robosuite as suite
from torch.utils.tensorboard import SummaryWriter
import time
import multiprocessing as mp
from functools import partial
def createstate(state):
all_states = np.array([])
for key in state.keys():
all_states = np.concatenate((all_states, state[key]))
return all_states
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=2):
eval_env = suite.make(
args.env,
has_renderer=False, # noon-screen renderer
has_offscreen_renderer=False, # no off-screen renderer
use_object_obs=True, # use object-centric feature
use_camera_obs=False, # no camera
reward_shaping=True,
)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
for step in range(200):
state = createstate(state)
action = policy.select_action( | np.array(state) | numpy.array |
"""
Frequency band information for different types of data processing.
"""
import numpy as np
# Chang lab frequencies
fq_min = 4.0749286538265
fq_max = 200.
scale = 7.
cfs = 2 ** (np.arange(np.log2(fq_min) * scale, np.log2(fq_max) * scale) / scale)
cfs = np.array(cfs)
sds = 10 ** ( np.log10(.39) + .5 * (np.log10(cfs)))
sds = np.array(sds) * | np.sqrt(2.) | numpy.sqrt |
from keras.optimizers import SGD
# import h5py
import cv2
from face_network import create_face_network
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
train_split = 0.7
# Folder path
PATH = "D:\\Users\\mguludag\\Desktop\\staj_proj\\bolgeler"
FILE_FORMAT = (".png", ".jpg")
# Get first three digits
def getImageId(name):
return name
images = []
imagesResized = []
region = []
for subdir, dirs, files in os.walk(PATH):
for file in files:
if file.endswith(FILE_FORMAT):
name = os.path.join(subdir, file)
im = cv2.imread(name, cv2.IMREAD_COLOR)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
# im.show()
images.append(np.array(im))
im = cv2.resize(im, (224, 224))
imagesResized.append(np.array(im))
imageId = getImageId(os.path.basename(subdir))
if(imageId=="akdeniz"):
region.append(0)
if(imageId=="ege"):
region.append(1)
if(imageId=="ic_anadolu"):
region.append(2)
if(imageId=="karadeniz"):
region.append(3)
# cv2.imshow("sfsf",im)
# cv2.waitKey(0)
# Concatenate
# images = np.float64(np.stack(images))
# print(images.shape)
imagesResized = np.float64(np.stack(imagesResized))
region = np.stack(region)
# Normalize data
# images /= 255.0
imagesResized /= 255.0
# f = h5py.File('images.h5', 'r')
X_data = imagesResized
y_data = region
#One-hot
y_data = to_categorical(y_data, 4)
# Split into training and validation sets
num_images = len(y_data)
p = | np.random.permutation(num_images) | numpy.random.permutation |
import os, sys
from math import sqrt, copysign
import pandas as pd
import numpy as np
import helpers as nhp
from helpers import rotmat_dict, rotmats
from LatticeModel import LatticeModel
from cached_property import cached_property
import random
from itertools import combinations
import plotly as py
import plotly.graph_objs as go
from Bio.PDB import PDBParser
from Bio.PDB.QCPSuperimposer import QCPSuperimposer
pdb_parser = PDBParser()
imposer = QCPSuperimposer()
neighbor_mods = np.array([
[2, 2, 2],[-2, -2, -2],
[-2, 2, 2],[2, -2, 2],[2, 2, -2],
[-2, -2, 2],[-2, 2, -2],[2, -2, -2]
])
cubic_neighbor_mods = np.array([
[0,0,4], [0,4,0], [4,0,0],
[0,0,-4],[0,-4,0], [-4,0,0],
])
neighbor_mods_d2 = np.unique(np.vstack([nm + neighbor_mods for nm in neighbor_mods]), axis=0)
neighbor_mods2 = np.vstack((neighbor_mods, neighbor_mods_d2))
mod2mod_dict = {nmi: np.argwhere(nhp.inNd(neighbor_mods2, nm1 * 2))[0,0] for nmi, nm1 in enumerate(neighbor_mods)}
tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in neighbor_mods]
# test: allow cubic paths for tags
cubic_tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in cubic_neighbor_mods]
tag_mods_single.extend(cubic_tag_mods_single)
tag_mods_bulk = []
for tm in tag_mods_single:
tmb = np.unique(np.vstack([tms + neighbor_mods2 for tmi, tms in enumerate(tm) if tmi > 1]), axis=0)
tmb = tmb[np.invert(nhp.inNd(tmb, tm))]
tag_mods_bulk.append(tmb)
tag_mods = list(zip(tag_mods_single, tag_mods_bulk))
quad_neighbor_mods_abs = np.array([
[0, 0, 4],
[0, 4, 0],
[4, 0, 0]
])
helix_array = np.array([[0, 0, 0],
[2, -2, 2],
[4, 0, 4],
[2, 2, 6],
[0, 0, 8]])
rotated_helix_array_list = [np.matmul(helix_array, rot) for rot in rotmats]
# mirror_dims = list(combinations([0,1,2], 2)) + [tuple([i]) for i in range(3)] + [(0, 1, 2)]
# mirrored_rotated_helix_array_list = [rhm for rhm in rotated_helix_array_list]
# helix_mod = np.array([[2, -2, 2],
# [2, 2, 2],
# [-2, 2, 2],
# [-2, -2, 2]])
# helix with equidistant neighbors
helix_v_truth = np.array([[6, 2, -2, 2],
[-6, -2, 2, -2]])
helix_h_truth = np.array([0, 0, 8])
# helix with 1 quad face transition
# helix_h_truth = np.array([0, 0, 6])
#
# helix_v_truth = np.array([[6, 0, -2, 2],
# [-6, 0, 2, -2]])
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
from: https://stackoverflow.com/questions/45142959/calculate-rotation-matrix-to-align-two-vectors-in-3d-space
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Lattice(LatticeModel):
"""Class containing all that pertains to a particular type of lattice (initialization, allowed moves etc.)
lattice type: body-centered cubic (bcc)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pdb_id = kwargs.get('pdb_id', 'unknown')
self.experimental_mode = kwargs['experimental_mode']
self.no_regularization = kwargs.get('no_regularization', False)
self.ca_dist = 3.8 # actual CA distance
self.lat_dist = sqrt((0.5 * self.ca_dist) ** 2 / 3) # distance of lattice edge
self.linker_dist = 21 # Distance tagged CA to dye
self.linker_dist_lat = sqrt(self.linker_dist ** 2 / 3)
self.n1_dist = 1.48 # estimate of distance N to CA
self.pairs_mat = kwargs['pairs_mat']
self.ss_df = kwargs['secondary_structure']
# self.sheet_series = nhp.list_sheet_series(self.ss_sequence)
self.coords = kwargs.get('coords', None)
self.prev_coords = self.coords.copy()
self.branch_rotation_idx_list = list(range(len(rotmats)))
self.cm_coords = kwargs.get('cm_coords', None)
self.finetune_structure = kwargs.get('finetune_structure', False)
def coords_are_valid(self):
"""
For testing purposes!
"""
for i, c in enumerate(self.coords[:-1]):
if not np.all(np.abs(self.coords[i+1] - c) == 2): return False
return True
@property
def cm_coords(self):
return self._cm_coords
@cm_coords.setter
def cm_coords(self, coords):
"""
Translate cm coords to unit lattice
"""
if coords is None:
self._cm_coords = None
return
self._cm_coords = (coords - coords[0]) / self.lat_dist
@cached_property
def sheet_block_dict(self):
out_dict = {}
cur_block_idx = 0
in_block = False
for si, ss in enumerate(self.ss_sequence):
if ss == 'S':
if not in_block:
cur_block_idx += 1
in_block = True
out_dict[si] = cur_block_idx
else:
in_block = False
return out_dict
@property
def ss_df(self):
return self._ss_df
@ss_df.setter
def ss_df(self, df):
df.loc[self.tagged_resi, :] = 00, 4, 4
df.loc[:, 'L'] = 0
df[df > 0] = 0
self._ss_df = df
# --- mutations ---
def apply_n_steps(self, n):
global_fun_list = [
# self.apply_crankshaft_move,
self.apply_branch_rotation,
self.apply_corner_flip,
# self.apply_pull_move # screws up helices, can't get it right
]
for _ in range(n):
random.shuffle(global_fun_list)
if global_fun_list[0](): pass
elif global_fun_list[1](): pass
# elif global_fun_list[2](): pass
# elif global_fun_list[3](): pass
else: return False
self.set_hash_list()
self.__dict__.pop('e_matrix', None)
return True
def check_helicity(self):
# test: see if helices are still in place
for ci, ss in self.ss_df.iterrows():
if ss.H >= 0: continue
helix_candidate = self.coords[ci:ci + 5] - self.coords[ci]
hel_dists = [np.linalg.norm(helix_candidate - hel) for hel in rotated_helix_array_list]
if not np.any(np.array(hel_dists) == 0):
return ci
@property
def branch_rotation_idx_list(self):
random.shuffle(self._branch_rotation_idx_list)
return self._branch_rotation_idx_list
@branch_rotation_idx_list.setter
def branch_rotation_idx_list(self, bri_list):
self._branch_rotation_idx_list = bri_list
def apply_branch_rotation(self):
mutations = list(range(-3, 4))
mutations.remove(0)
random.shuffle(mutations)
idx_list = list(range(self.seq_length - 1))
idx_list = np.array(idx_list)[self.ss_sequence[:-1] != 'H']
random.shuffle(idx_list) # randomize positions to check
for ci in idx_list: # omit last position, where rotation does not make sense
for mi in self.branch_rotation_idx_list:
candidate = self.branch_rotation(self._coords[ci + 1:, :], self._coords[ci, :], mi)
if not np.any(nhp.inNd(candidate, self.coords[:ci, :])):
self._coords[ci + 1:, :] = candidate
return True
# candidate[ci + 1:, :] = self.branch_rotation(self._coords[ci + 1:, :], self._coords[ci, :], mut)
# if self.is_valid_candidate(candidate):
# self.coords = candidate
# return True
return False
def apply_pull_move(self):
direction = [-1, 1]
random.shuffle(direction)
idx_list = list(range(2, self.seq_length - 2))
idx_list = np.array(idx_list)[self.ss_sequence[2:-2] != 'H']
random.shuffle(idx_list) # randomize positions to check
candidate_found = False
for ri in idx_list:
for dir in direction:
if self.ss_sequence[ri + dir] == 'H' or self.ss_sequence[ri + dir * 2] == 'H': continue
# Candidates for first moved atom should be
l0_candidates = self.coords[ri + dir] + neighbor_mods_d2 # reachable from their old pos by 2 steps
l0_candidates = l0_candidates[nhp.inNd(l0_candidates, self.coords[ri] + neighbor_mods)] # adjacent to non-moved atom
l0_candidates = l0_candidates[np.invert(nhp.inNd(l0_candidates, self.coords))] # unoccupied
if not len(l0_candidates): continue
np.random.shuffle(l0_candidates)
for l0 in l0_candidates:
# Candidates for second moved atom should be...
l1_candidates = self.coords[ri + dir * 2] + neighbor_mods_d2 # reachable from their old pos by 2 steps
l1_candidates = l1_candidates[nhp.inNd(l1_candidates, l0 + neighbor_mods)] # adjacent to new l0 coord
if not len(l1_candidates): continue
l1_candidates = l1_candidates[np.invert(self.inNd(l1_candidates))] # unoccupied
if not len(l1_candidates): continue
l0_idx = ri + dir
d2_pos = l1_candidates[np.random.randint(len(l1_candidates))]
# Get position for third moved atom: between new d2 position and old d2 position
d1_candidates = d2_pos + neighbor_mods
d1_pos = d1_candidates[nhp.inNd(d1_candidates, self.coords[ri + dir * 2] + neighbor_mods)][0]
if self.inNd(d1_pos)[0]: continue
self._coords[ri + dir] = l0
change_idx = np.arange(ri + 2, self.seq_length) if dir == 1 else np.arange(ri-1)[::-1]
candidate_found = True
break
if candidate_found: break
if candidate_found: break
if not candidate_found:
return False
# Fill in positions
prev_c = l0_idx
first_H = True
for c in change_idx:
if self.ss_sequence[c] != 'H' and np.all(np.abs(self.coords[c] - self.coords[prev_c]) == 2):
break
if self.ss_sequence[c] == 'H':
if first_H:
helix_transl = self.coords[c] - d2_pos
self.coords[c] = d2_pos
first_H = False
else:
d2_pos = d1_pos
d1_pos = self.coords[c-1]
self.coords[c] = self.coords[c] + helix_transl
continue
else:
first_H = True
old_coord = self.coords[c].copy()
self.coords[c] = d2_pos
d2_pos = d1_pos
d1_pos = old_coord
prev_c = c
return True
def apply_corner_flip(self):
# Find idx of corners
diff1 = self.coords[1:] - self.coords[:-1]
corner_bool = np.invert(np.all(np.equal(diff1[:-1], diff1[1:]), axis=1))
# corner_bool = np.count_nonzero((self._coords[2:, :] - self._coords[:-2, :]), axis=1) == 1
corner_bool[self.ss_sequence[1:-1] == 'H'] = False
if not np.any(corner_bool): return False
corner_idx = np.squeeze(np.argwhere(corner_bool), axis=1) + 1 # +1 as idx was of left neighbor
np.random.shuffle(corner_idx)
# Generate & check candidates
for ci in corner_idx:
candidate = self.corner_flip(self._coords[ci - 1, :3],
self._coords[ci, :3],
self._coords[ci + 1, :3])
if not self.inNd(candidate)[0]:
# if not nhp.inNd(candidate, self.coords)[0]:
self._coords[ci, :] = candidate
return True
return False
def apply_crankshaft_move(self):
# temporarily shutdown: not sure how this contributes for BCC
diff_4pos = self._coords[3:, :] - self._coords[:-3, :] # Diff res with two spaces
crank_bool = np.all(np.absolute(diff_4pos) == 2, axis=1)
# crank_bool = np.sum(np.absolute(diff_4pos), axis=1) == 2 # if diff is 2 for that postion, it must be a u-loop
if not np.any(crank_bool): return False
crank_idx = np.squeeze(np.argwhere(crank_bool), axis=1) # index of left-most position of the four points!
np.random.shuffle(crank_idx)
# Generate & check candidates
for ci in crank_idx:
crank_idx, crank_dir = abs(ci), copysign(1, ci)
c0, c1, c2, c3 = self.coords[ci:ci + 4, :]
c1_candidates = c0 + neighbor_mods
c2_candidates = c3 + neighbor_mods
c1_candidates = c1_candidates[np.invert(self.inNd(c1_candidates)), :]
c2_candidates = c2_candidates[np.invert(self.inNd(c2_candidates)), :]
if not len(c1_candidates) or not len(c2_candidates): continue
np.random.shuffle(c1_candidates)
for c1_candidate in c1_candidates:
c2_idx = nhp.inNd(c2_candidates, c1_candidate + neighbor_mods)
if np.any(c2_idx):
c2_candidates = c2_candidates[c2_idx]
np.random.shuffle(c2_candidates)
self._coords[ci + 1:ci + 3, :] = np.vstack((c1_candidate, c2_candidates[0]))
return True
return False
def set_hash_list(self):
self.hash_list = set([hash(cc.tostring()) for cc in self.coords])
def inNd(self, c):
if c.ndim == 1:
c = np.expand_dims(c, 0)
c_hash_list = [hash(cc.tostring()) for cc in c]
return [ch in self.hash_list for ch in c_hash_list]
@staticmethod
def branch_rotation(c, pivot, dim):
"""
:param c: coordinates to change
:param pivot: point around which to rotate
:param dim: signed dimension in which to perform rotation (1, 2 or 3), pos for fwd, neg for rev
:return: mutated coords
"""
return np.dot(rotmats[dim], (c - pivot).T).T + pivot
@staticmethod
def corner_flip(c1, c2, c3):
return c2 + ((c1 + c3) - 2 * c2)
# --- stats and derived properties ----
def get_pdb_coords(self, intermediate=False, conect_only=False):
"""
Return coordinates in pdb format, as string
:param intermediate: return without CONECT cards, required to create pdbs with multiple models
:param conect_only: only return the CONECT cards
:return:
"""
coords_ca = self.coords - self.coords[0] # translate to 0,0,0
coords_ca = coords_ca * self.lat_dist # unit distances to real distances
cn = (self.coords[1] - self.coords[0]) * -1 * sqrt(self.n1_dist ** 2 / 3) # stick on N1 in opposite direction of chain
cn_str = nhp.pdb_coord(cn)
# resn = nhp.aa_dict[self.aa_sequence[0]]
resn = nhp.aa_dict.get(self.aa_sequence[0], self.aa_sequence[0])
txt = f'HETATM 1 N {resn} A 1 {cn_str} 1.00 1.00 N\n'
# Add CA coordinates
an = 2 # atom number, start at 2 for first N
an_alpha = 1 # tracker of alpha carbon atom number, just for CONECT record
resi = 1
conect = ""
tag_coord_dict = {0: []} # Fill in tag at pos 0, in case no other residues are tagged
for ci in self.tagged_resi:
if ci == 0: continue
tag_coord_dict[ci], tag_coord_dict[0] = self.get_dye_coords(ci, 0)
for ci, ca in enumerate(coords_ca):
# --- add alpha carbon CA ---
# resn_str = nhp.aa_dict[self.aa_sequence[ci]]
resn_str = nhp.aa_dict.get(self.aa_sequence[ci], self.aa_sequence[ci])
resi_str = str(resi).rjust(4)
ca_str = nhp.pdb_coord(ca)
txt += f'HETATM{str(an).rjust(5)} CA {resn_str} A{resi_str} {ca_str} 1.00 1.00 C\n'
conect += f"CONECT{str(an_alpha).rjust(5)}{str(an).rjust(5)}\n"
an_alpha = an
an += 1
if ci in self.tagged_resi: # Add tag residue
if not len(tag_coord_dict[ci]): continue
dye_coord = tag_coord_dict[ci]
tc_str = nhp.pdb_coord(dye_coord[0])
txt += f'HETATM{str(an).rjust(5)} CB {resn_str} A{resi_str} {tc_str} 1.00 1.00 C\n'
conect += f"CONECT{str(an_alpha).rjust(5)}{str(an).rjust(5)}\n"
an += 1
resi += 1
# Add terminus
an_str = str(an).rjust(5)
resn_str = nhp.aa_dict.get(self.aa_sequence[-1], self.aa_sequence[-1])
resi_str = str(resi - 1).rjust(4) # - 1: still on same residue as last CA
txt += f'TER {an_str} {resn_str} A{resi_str}\n'
if conect_only:
return conect
elif intermediate:
return txt
return txt + conect
def plot_structure(self, fn=None, auto_open=True):
cols = [nhp.aa_to_hp_dict[aa] if resi not in self.tagged_resi else 'purple'
for resi, aa in enumerate(self.aa_sequence)]
trace_bb = go.Scatter3d(x=self.coords[:, 0],
y=self.coords[:, 1],
z=self.coords[:, 2],
line=dict(color=cols, width=20),
# marker=dict(size=5)
)
trace_list = [trace_bb]
pmin = np.min(self.coords)
pmax = np.max(self.coords)
layout = go.Layout(scene=dict(
xaxis=dict(range=[pmin, pmax]),
yaxis=dict(range=[pmin, pmax]),
zaxis=dict(range=[pmin, pmax]),
aspectmode='cube'
)
)
fig = go.Figure(data=trace_list, layout=layout)
if fn is None:
py.offline.plot(fig, auto_open=auto_open)
else:
py.offline.plot(fig, filename=fn, auto_open=auto_open)
def get_neighbors(self, c):
return neighbor_mods + c
# --- setters & properties ---
def get_dye_coords(self, ci, partner_idx, expected_value=None):
tag_obstructions_list = self.get_tag_obstructions(ci)
unobstructed_tag_mods = [tm[0] / np.linalg.norm(tm[0]) for ti, tm in enumerate(tag_mods_single) if tag_obstructions_list[ti] == 0]
if not len(unobstructed_tag_mods): return [], []
ptc = (self.coords[partner_idx] - self.coords[ci])
ptc = ptc / np.linalg.norm(ptc)
tag_ca_dist = np.linalg.norm(self.coords[partner_idx] - self.coords[ci]) * self.lat_dist
# Filter tag positions on angle
angle_limit = 70 if tag_ca_dist <= 20 else 0
angles = [nhp.get_angle(ptc, ut) for ut in unobstructed_tag_mods]
ptc_angles_idx = [it for it, ut in enumerate(unobstructed_tag_mods) if angles[it] > angle_limit]
if not len(ptc_angles_idx):
ptc_angles_idx = [np.argmax(angles)]
# Filter tag positions on dihedral
# dist_best = np.Inf
largest_dh = (-np.Inf, ())
tuple_list = []
tag0_obstructions_list = self.get_tag_obstructions(partner_idx)
unobstructed_tag0_mods = [tm[0] / np.linalg.norm(tm[0]) for ti, tm in enumerate(tag_mods_single) if
tag0_obstructions_list[ti] == 0]
if not len(unobstructed_tag0_mods): return [], []
for ti in ptc_angles_idx:
for t0 in unobstructed_tag0_mods:
dihedral = nhp.get_abs_dihedral(self.coords[ci], self.coords[0],
self.coords[ci] + unobstructed_tag_mods[ti],
self.coords[0] + t0)
if dihedral > angle_limit:
tuple_list.append((unobstructed_tag_mods[ti], t0))
if dihedral > largest_dh[0]:
largest_dh = (dihedral, (unobstructed_tag_mods[ti], t0))
# dist = np.abs(dihedral - angles[ti])
# if dist < dist_best:
# tuple_best = [unobstructed_tag_mods[ti], t0]
# dist_best = dist
# if dist_best > 3: return [], []
if len(tuple_list):
tuple_best = random.choice(tuple_list)
else:
tuple_best = largest_dh[1]
return [(self.coords[ci] - self.coords[0]) * self.lat_dist + tuple_best[0] * self.linker_dist], \
[tuple_best[1] * self.linker_dist]
@property
def dist_fingerprint(self):
if len(self.tagged_resi) < 2: return []
fp = {}
for fi in self.tagged_resi:
if fi == 0: continue
dye_coords, dye_coords_0 = self.get_dye_coords(fi, 0)
if not len(dye_coords): continue
cur_fp = []
for d0 in dye_coords_0:
cur_fp.extend([np.linalg.norm(d0 - dc) for dc in dye_coords])
tt = self.tagged_resi_dict[fi] # tag type
if tt in fp:
fp[tt].append(np.mean(cur_fp))
else:
fp[tt] = [np.mean(cur_fp)]
# fp.append(np.mean(cur_fp))
return fp
@property
def base_energy(self):
return np.sum(self.individual_energies)
@property
def individual_energies(self):
"""
Energy cost function
"""
emat, e_wat, e_dsb, e_tag, e_reg = self.e_matrix
e_aa = emat[:-4, :].sum().sum() / 2
e_ss = emat[-4:-1, :].sum().sum()
return e_aa, e_ss, e_wat, e_dsb, e_tag, e_reg
def beta_sheet_bend_rule(self, c):
# return np.sum(np.abs(c[2] - c[0]) == 4) > 1 # true if angles of 109.5 or 180 deg
# return np.sum(np.abs(c[2] - c[0]) == 4) == 3 # true if angles 180 deg
if len(c) == 2:
return True
return np.sum(np.abs(c[2] - c[0]) == 4) == 2 # true if angles 109.5 deg
def beta_sheet_parallel_rule(self, neighbors, adjacents):
parallel_dist = neighbors - adjacents
inverse_dist = neighbors[::-1] - adjacents
parallel_check = nhp.inNd(np.abs(parallel_dist[0]), quad_neighbor_mods_abs)[0] and len(np.unique(parallel_dist, axis=0)) == 1
inverse_check = nhp.inNd( | np.abs(inverse_dist[0]) | numpy.abs |
import numpy as np
import tensorflow as tf
import os
import myPickle
XNAME = 'X.txt'
CMNAME = 'charmap.pickle'
ENCODING = 'ascii'
buffer_size = 10000
# Special tokens
MASK = 3
END = 2
PAD = 0
################
MAX_MASKED = .3
# def mask(x, xl):
# if INCLUDE_END_SYMBOL: xl -= 1
# k = np.random.randint(0, xl)
# y = x[k]
# x[k] = MASK
# return x, [y], [k]
def mask(x_index_copy, x_len, MAX_MASKED, INCLUDE_END_SYMBOL):
if INCLUDE_END_SYMBOL:
x_len -= 1
if MAX_MASKED == -1:
# single missing
masked_index = [ | np.random.randint(0, x_len) | numpy.random.randint |
import numpy as np
from . import settings
import scipy.ndimage as ndi
import scipy.constants as const
#import matplotlib.pyplot as plt
from . import gaussians as G
def velfield(params, shape, oversample=1):
"""
Make a 2d array containing a velocity field. This Velocity field can be oversampled compared to the KMOS spaxel resolution.
* Take a set of X,Y coordinates. These are _larger_ than the data we're trying to fit- we pad them such that we can shift the velocity map
to the centre at the end.
* Rotate these coordinates by angle PA degrees.
* Make a velocity map in terms of these rotated coordinates, with the centre at data.shape/2! NOT at the required xc, yc coordinates yet.
* Finally, shift using ndi.shift
* Crop away the extra padded values
Args:
params (dict): A dictionary containing the keys 'PA' (position angle
of velocity map), 'xc' and 'yc' (map centres), 'theta' (intrinsic inclination of disk), 'log_r0' and 'log_s0' (disk scale and surface brightness parameters) and 'v0' (velocity offset of central pixel)
shape (tuple): The maximum x and y values of the disk. Note that a
shape of (30, 30) can be oversampled to larger than (30, 30) output
oversample (int, optional): Integer amount to oversample the array by.
Returns:
array: An array containing the velocity map, of shape (shape)*oversample
"""
#This is the 'angular eccentricity'
#Shapes the flattening of the elliptical coordinates
#cos(theta) is just b/a for the ellipse
#sin(theta) is sqrt(1-b**2/a**2), or the eccentricity e
#Should limit a=5b for reasonable galaxies
assert type(oversample)==int, 'Oversample must be an integer'
PA=params['PA']
xc=params['xc']
yc=params['yc']
v0=params['v0']
PA_rad=PA*np.pi/180.
#Get coordinate axes
max_shift=settings.max_centre_shift
ys=np.linspace(0-max_shift, shape[0]+max_shift, oversample*(shape[0]+2*max_shift))
xs=np.linspace(0-max_shift, shape[1]+max_shift, oversample*(shape[1]+2*max_shift))
X_0, Y_0=np.meshgrid(xs, ys)
#Shift things to the centre, rotate them by PA, then shift back
centre_x=shape[0]/2.0
centre_y=shape[1]/2.0
X_r, Y_r=rotate_coordinates(X_0-centre_x, Y_0-centre_y, PA_rad)
X=X_r+centre_x
Y=Y_r+centre_y
#Intrinisc viewing angle of the disk
theta=params['theta']
theta_rad=theta*np.pi/180.
#Get the simple axisymetric velfield, then scale by (X-centre_of_array)/R)
R = np.sqrt((X-centre_x)**2 + ((Y-centre_y)/np.cos(theta_rad))**2)
velfield= v_circ_exp_quick(R, params)*(X-centre_x)/(R* | np.sin(theta_rad) | numpy.sin |
'''###########################################
CS221 Final Project: Deep Q-Learning Implementation
Authors:
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
###########################################'''
import sys, math
import numpy as np
from collections import deque
import random
import copy
import gym
import keras
from keras.models import Sequential
from keras.layers import Dense
############################################################
class QLearningAlgorithm():
def __init__(self, actions, discount, weights, explorationProb=0.2, exploreProbDecay=0.99, explorationProbMin=0.01, batchSize=32):
self.actions = actions
self.discount = discount
self.explorationProb = explorationProb
self.exploreProbDecay = exploreProbDecay
self.explorationProbMin = explorationProbMin
self.weights = weights
self.numIters = 0
self.model = NeuralNetwork(batchSize, weights)
self.cache = deque(maxlen=1000000)
# This algorithm will produce an action given a state.
# Here we use the epsilon-greedy algorithm: with probability
# |explorationProb|, take a random action.
def getAction(self, state):
if np.random.rand() < self.explorationProb:
return random.choice(self.actions)
else:
predScores = self.model.predict(state)[0]
return np.argmax(predScores)
# We will call this function with (s, a, r, s'), which you should use to update |weights|.
# Note that if s is a terminal state, then s' will be None. Remember to check for this.
# You should update the weights using self.getStepSize(); use
# self.getQ() to compute the current estimate of the parameters.
def incorporateFeedback(self, states, actions, rewards, newStates, dones):
# initialize variable
states = np.squeeze(states)
newStates = np.squeeze(newStates)
X = states
y = self.model.predict(states)
# calculate gradient
targets = rewards + self.discount*(np.amax(self.model.predict(newStates), axis=1))*(1-dones)
ind = np.array([i for i in range(len(states))])
y[[ind], [actions]] = targets
# update weight
self.model.fit(X, y)
def updateCache(self, state, action, reward, newState, done):
self.cache.append((state, action, reward, newState, done))
# neural network
class NeuralNetwork():
def __init__(self, batchSize = 32, weights=None):
self.model = Sequential()
self.model.add(Dense(100, input_dim=8, activation='relu'))
self.model.add(Dense(100, activation='relu'))
self.model.add(Dense(4, activation='linear'))
adam = keras.optimizers.adam(lr=0.001)
self.model.compile(loss='mse', optimizer=adam)
if isinstance(weights, str):
self.model.load_weights(weights)
def predict(self, state):
return self.model.predict_on_batch(state)
def fit(self, X, y):
self.model.fit(X, y, epochs=1, verbose=0)
def save(self, weights):
self.model.save_weights(weights)
# Perform |numTrials| of the following:
# On each trial, take the MDP |mdp| and an RLAlgorithm |rl| and simulates the
# RL algorithm according to the dynamics of the MDP.
# Each trial will run for at most |maxIterations|.
# Return the list of rewards that we get for each trial.
def simulate(env, rl, numTrials=10, train=False, verbose=False,
trialDemoInterval=10, batchSize=32):
totalRewards = [] # The rewards we get on each trial
for trial in range(numTrials):
state = np.reshape(env.reset(), (1,8))
totalReward = 0
iteration = 0
while iteration <= 500:
# while True:
action = rl.getAction(state)
newState, reward, done, info = env.step(action)
newState = | np.reshape(newState, (1,8)) | numpy.reshape |
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Copyright 2004-2005 by Enthought, Inc.
#
# The code has been adapted by <NAME> to work with GPUs
# using Cocos from the SciPy code available at
# https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
#
# The open source license of the original code is reproduced below:
#
# Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from cached_property import cached_property
import numbers
import typing as tp
import warnings
from cocos.multi_processing.device_pool import ComputeDevicePool
from cocos.multi_processing.single_device_batch_processing import map_combine_single_device
from cocos.multi_processing.utilities import generate_slices_with_number_of_batches
import cocos.numerics as cn
from cocos.numerics.data_types import NumericArray
import cocos.device as cd
from cocos.numerics.numerical_package_selector import select_num_pack
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
# # from . import mvn
# from scipy.stats import mvn
# from ._stats import gaussian_kernel_estimate
#
# from scipy.stats._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
def _split_points_into_batches(points: NumericArray,
number_of_points_per_batch: int) \
-> tp.List[tp.List[NumericArray]]:
number_of_points = points.shape[1]
n_begin = 0
args_list = []
while n_begin < number_of_points:
n_end = min(n_begin + number_of_points_per_batch, number_of_points)
args_list.append([points[:, n_begin:n_end]])
n_begin = n_end
return args_list
def _check_array_at_right_location_and_convert(array,
gpu: bool,
dtype: np.generic = np.float32):
if isinstance(array, np.ndarray) and gpu:
array = cn.array(array)
if isinstance(array, cn.ndarray) and not gpu:
array = np.array(array)
if array.dtype != dtype:
array = array.astype(dtype)
return array
def ensure_consistent_numeric_arrays(arrays: tp.Iterable[tp.Optional[NumericArray]],
gpu: bool,
dtype: np.generic = np.float32):
return tuple(_check_array_at_right_location_and_convert(array=array, gpu=gpu, dtype=dtype)
if array is not None
else None
for array
in arrays)
def _verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points: NumericArray,
values: NumericArray,
xi: NumericArray) \
-> tp.Tuple[int, int, int]:
n = points.shape[0]
if points.ndim > 1:
d = points.shape[1]
else:
d = 1
m = xi.shape[0]
if values.ndim > 1:
p = values.shape[1]
else:
p = 1
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError(f"points and xi must have same trailing dim but the shape of xi is {xi.shape}")
return n, m, d
def gaussian_kernel_estimate_vectorized_whitened(whitening: NumericArray,
whitened_points: NumericArray,
values: NumericArray,
xi: NumericArray,
norm: float,
dtype: np.generic,
gpu: bool) -> NumericArray:
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=whitened_points,
values=values,
xi=xi)
whitened_points, values, xi, whitening = \
ensure_consistent_numeric_arrays((whitened_points, values, xi, whitening), gpu)
num_pack = select_num_pack(gpu)
whitened_points = whitened_points.astype(dtype, copy=False)
whitened_xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Create the result array and evaluate the weighted sum
whitened_points = whitened_points.reshape((n, 1, d))
whitened_xi = whitened_xi.reshape((1, m, d))
residual = whitened_points - whitened_xi
arg = residual * residual
del residual
if d > 1:
assert arg.shape == (n, m, d)
arg = num_pack.sum(arg, axis=2)
else:
arg = arg.reshape((n, m))
if not gpu:
assert arg.shape == (n, m)
arg = num_pack.exp(- 0.5 * arg) * norm
if not gpu:
assert arg.shape == (n, m)
# estimate = num_pack.dot(arg.T, values)
estimate = (values * arg).sum(axis=0)
if estimate.ndim > 1:
estimate = estimate.squeeze()
if gpu:
cd.sync()
return estimate
def gaussian_kernel_estimate_vectorized(points: NumericArray,
values: NumericArray,
xi: NumericArray,
precision: NumericArray,
dtype: np.generic,
gpu: bool = False) \
-> NumericArray:
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
dtype : the result dtype
gpu : whether to compute the gaussian kernel estimate on the gpu
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
num_pack = select_num_pack(gpu)
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=points,
values=values,
xi=xi)
# n = points.shape[0]
#
# if points.ndim > 1:
# d = points.shape[1]
# else:
# d = 1
# m = xi.shape[0]
#
# if values.ndim > 1:
# p = values.shape[1]
# else:
# p = 1
#
# if p != 1:
# raise ValueError('p != 1 is not supported')
#
# if xi.shape[1] != d:
# raise ValueError("points and xi must have same trailing dim")
# if precision.shape[0] != d or precision.shape[1] != d:
# raise ValueError("precision matrix must match data dims")
points, values, xi, precision = \
ensure_consistent_numeric_arrays((points, values, xi, precision), gpu)
print(f'type(points) = {type(points)}')
print(f'type(values) = {type(values)}')
print(f'type(xi) = {type(xi)}')
print(f'type(precision) = {type(precision)}')
# Rescale the data
whitening = num_pack.linalg.cholesky(precision).astype(dtype, copy=False)
points = num_pack.dot(points, whitening).astype(dtype, copy=False)
# xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2) * num_pack.prod(num_pack.diag(whitening))
# # Create the result array and evaluate the weighted sum
# points = points.reshape((n, 1, d))
# xi = xi.reshape((1, m, d))
# residual = points - xi
# arg = residual * residual
# del residual
# if d > 1:
# assert arg.shape == (n, m, d)
# arg = num_pack.sum(arg, axis=2)
# else:
# arg = arg.reshape((n, m))
# assert arg.shape == (n, m)
# arg = num_pack.exp(- 0.5 * arg) * norm
# assert arg.shape == (n, m)
#
# estimate = num_pack.dot(arg.T, values)
#
# if gpu:
# cd.sync()
#
# return estimate.squeeze()
return gaussian_kernel_estimate_vectorized_whitened(whitening=whitening,
whitened_points=points,
xi=xi,
values=values,
norm=norm,
dtype=dtype,
gpu=gpu)
def gaussian_kernel_estimate(points, values, xi, precision, dtype):
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
n = points.shape[0]
d = points.shape[1]
m = xi.shape[0]
p = values.shape[1]
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError("points and xi must have same trailing dim")
if precision.shape[0] != d or precision.shape[1] != d:
raise ValueError("precision matrix must match data dims")
# Rescale the data
whitening = np.linalg.cholesky(precision).astype(dtype, copy=False)
points_ = np.dot(points, whitening).astype(dtype, copy=False)
xi_ = np.dot(xi, whitening).astype(dtype, copy=False)
values_ = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2)
for i in range(d):
norm *= whitening[i, i]
# Create the result array and evaluate the weighted sum
estimate = np.zeros((m, p), dtype)
for i in range(n):
for j in range(m):
arg = 0
for k in range(d):
residual = (points_[i, k] - xi_[j, k])
arg += residual * residual
arg = np.exp(-arg / 2) * norm
for k in range(p):
estimate[j, k] += values_[i, k] * arg
return np.asarray(estimate)
@dataclass(frozen=True)
class GaussianKDEInformation:
points: np.ndarray # (d, n) shaped array of datapoints
weights: np.ndarray # (d, n) shaped array of weights, optional
dimension: int # data dimension
n: int # number of data points
neff: float # effective sample size
CovarianceFactorFunctionType = tp.Callable[[GaussianKDEInformation], float]
SCOTTS_FACTOR_STRING = 'scotts'
SILVERMAN_FACTOR_STRING = 'silverman'
def compute_scotts_factor(kde_info: GaussianKDEInformation) -> float:
return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
def compute_silverman_factor(kde_info: GaussianKDEInformation) -> float:
d = kde_info.dimension
neff = kde_info.neff
return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
# class CovarianceFactor(ABC):
# @abstractmethod
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# pass
#
#
# class ScottsFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
#
#
# class SilvermanFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# d = kde_info.dimension
# neff = kde_info.neff
# return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
#
#
# class LambdaCovarianceFactor(CovarianceFactor):
# def __init__(self, covariance_factor_fun: tp.Callable[[GaussianKDEInformation], float]):
# self._covariance_factor_fun = covariance_factor_fun
#
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return self._covariance_factor_fun(kde_info)
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
gpu: whether to evaluate the kernel density estimate on the gpu
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] <NAME>, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] <NAME>, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] <NAME>, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] <NAME> and <NAME>, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] <NAME>., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self,
dataset: NumericArray,
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None,
weights: tp.Optional[NumericArray] = None,
gpu: bool = False):
self._num_pack = select_num_pack(gpu)
self._gpu = gpu
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
weights = atleast_1d(weights).astype(float)
weights /= np.sum(weights)
if weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1.0/np.sum(weights**2)
else:
weights = ones(self.n) / self.n
if gpu:
dtype = np.float32
weights = weights.astype(dtype)
self.dataset = self.dataset.astype(dtype)
self._weights = weights
self._covariance_factor = \
self._get_covariance_factor_function_from_bandwidth_type(bw_method)
self._compute_covariance()
def _check_and_adjust_dimensions_of_points(self, points: np.ndarray) \
-> np.ndarray:
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
raise ValueError(f"points have dimension {d}, "
f"dataset has dimension {self.d}")
return points
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = self._check_and_adjust_dimensions_of_points(points)
output_dtype = np.common_type(self.covariance, points)
if True:
# result = gaussian_kernel_estimate_vectorized(points=self.dataset.T,
# values=self.weights[:, None],
# xi=points.T,
# precision=self.inv_cov,
# dtype=output_dtype,
# gpu=self._gpu)
result = gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points.T,
norm=self.normalization_constant,
dtype=output_dtype,
gpu=self._gpu)
return result
else:
result = gaussian_kernel_estimate(points=self.dataset.T,
values=self.weights[:, None],
xi=points.T,
precision=self.inv_cov,
dtype=output_dtype)
return result[:, 0]
__call__ = evaluate
def evaluate_in_batches(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=self.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
def evaluate_in_batches_on_multiple_devices(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int,
compute_device_pool: ComputeDevicePool) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
if self.gpu:
raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
points = self._check_and_adjust_dimensions_of_points(points)
number_of_points = points.shape[1]
args_list = []
for begin_index, end_index in generate_slices_with_number_of_batches(number_of_points,
compute_device_pool.number_of_devices):
args_list.append([points[:, begin_index:end_index]])
# points_per_device = math.floor(number_of_points / gpu_pool.number_of_devices)
# args_list = _split_points_into_batches(points, points_per_device)
kwargs_list = compute_device_pool.number_of_devices * \
[
{'maximum_number_of_elements_per_batch': maximum_number_of_elements_per_batch,
'n': self.n,
'd': self.d}
]
def f(points_internal,
maximum_number_of_elements_per_batch: int,
n: int,
d: int):
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (n * d))
args_list_internal = _split_points_into_batches(points_internal, points_per_batch)
def f_internal(points_internal_internal):
return gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points_internal_internal.T,
norm=self.normalization_constant,
dtype=np.float32,
gpu=True)
result = \
map_combine_single_device(f=f_internal,
combination=lambda x: np.hstack(x),
args_list=args_list_internal)
return result
result = \
compute_device_pool.map_combine(f=f,
combination=lambda x: np.hstack(x),
args_list=args_list,
kwargs_list=kwargs_list)
return result
# def evaluate_in_batches_on_multiple_gpus(self,
# points: NumericArray,
# maximum_number_of_elements_per_batch: int,
# gpu_pool: ComputeDevicePool) \
# -> np.ndarray:
# """
# Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
#
# Args:
# points:
# numeric array with shape (d, m) containing the points at which to evaluate the kernel
# density estimate
# maximum_number_of_elements_per_batch:
# maximum number of data points times evaluation points to process in a single batch
#
# Returns:
# a m-dimensional NumPy array of kernel density estimates
# """
# if self.gpu:
# raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
#
# points = self._check_and_adjust_dimensions_of_points(points)
#
# # number_of_points = points.shape[1]
# points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
#
# args_list = _split_points_into_batches(points, points_per_batch)
#
# def f(x):
# result = gaussian_kernel_estimate_vectorized_whitened(
# whitening=self.whitening,
# whitened_points=self.whitened_points,
# values=self.weights[:, None],
# xi=x.T,
# norm=self.normalization_constant,
# dtype=np.float32,
# gpu=True)
#
# return result
#
# result = \
# gpu_pool.map_combine(f=f,
# combination=lambda x: np.hstack(x),
# args_list=args_list)
#
# return result
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
# def integrate_box(self, low_bounds, high_bounds, maxpts=None):
# """Computes the integral of a pdf over a rectangular interval.
#
# Parameters
# ----------
# low_bounds : array_like
# A 1-D array containing the lower bounds of integration.
# high_bounds : array_like
# A 1-D array containing the upper bounds of integration.
# maxpts : int, optional
# The maximum number of points to use for integration.
#
# Returns
# -------
# value : scalar
# The result of the integral.
#
# """
# if maxpts is not None:
# extra_kwds = {'maxpts': maxpts}
# else:
# extra_kwds = {}
#
# value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
# self.dataset, self.weights,
# self.covariance, **extra_kwds)
# if inform:
# msg = ('An integral in mvn.mvnun requires more points than %s' %
# (self.d * 1000))
# warnings.warn(msg)
#
# return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
Specify `seed` for reproducible drawing of random variates.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
@staticmethod
def _get_covariance_factor_function_from_bandwidth_type(
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None) \
-> CovarianceFactorFunctionType:
"""
Infers the bandwidth selection method from
Args:
bw_method: either 'scotts' or 'silverman' or a scalar or a function returning a float
Returns:
covariance factor function
"""
if bw_method is None:
return compute_scotts_factor
elif isinstance(bw_method, str):
if bw_method == SCOTTS_FACTOR_STRING:
return compute_scotts_factor
elif bw_method == SILVERMAN_FACTOR_STRING:
return compute_silverman_factor
else:
raise ValueError(f'bw_method={bw_method} is not supported')
elif callable(bw_method):
return bw_method
elif np.isscalar(bw_method):
return lambda kde_info: bw_method
else:
raise ValueError(f'bw_method {bw_method} is not supported')
def _compute_covariance(self):
"""
Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
kde_info = GaussianKDEInformation(dimension=self.d,
n=self.n,
neff=self.neff,
points=self.dataset,
weights=self.weights)
self.factor = self._covariance_factor(kde_info)
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = \
atleast_2d(cov(self.dataset,
rowvar=True,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x: np.ndarray) -> NumericArray:
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = | zeros((self.n, m), dtype=float) | numpy.zeros |
import numpy as np
from math import atan2, floor, pi
def hashkey(gy, gx, Qangle, W, precision, strSplitter=None, coheSplitter=None):
# Transform 2D matrix into 1D array
gx = gx.ravel()
gy = gy.ravel()
# SVD calculation
G = np.vstack((gx,gy)).T
GTWG = G.T.dot(W).dot(G).astype(precision)
ma = GTWG[0,0]
mb = GTWG[0,1]
mc = GTWG[1,0]
md = GTWG[1,1]
T = precision(ma + md)
D = precision(ma * md - mb * mc)
SQ = precision((T * T)/4 - D)
if SQ < 0:
if not np.isclose(SQ, 0, atol=1e-04):
print('SQ={}'.format(SQ))
SQ = 0
L1 = precision(T/2 + np.sqrt(SQ))
L2 = precision(T/2 - | np.sqrt(SQ) | numpy.sqrt |
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*np.sin(phi)*u.mas/u.yr
v0[2] = a*np.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['minit'] = minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_input()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Assumes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Assumes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_input(self):
"""Format input parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds all observations
stream.err holds all errors"""
x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.append(self.trailing['x'].unit)
units.append(self.trailing['v'].unit)
if len(errors)<2:
errors.append(0.2*u.kpc)
errors.append(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = np.ones(np.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = np.ones(np.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = np.concatenate([x,v]).value
self.err = np.concatenate([ex,ev]).value
elif mode=='equatorial':
# assumes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = np.hstack([ra, dec, dist, vr, mua, mud]).value
obs = np.reshape(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if np.allclose(rotmatrix, np.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = np.ones(np.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= np.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = np.loadtxt("../data/{0:s}_all.txt".format(name))
self.streak = np.poly1d(poly)
self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N)
self.streak_y = np.polyval(self.streak, self.streak_x)
self.streak_b = np.zeros(N)
self.streak_l = np.zeros(N)
pdot = np.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = np.transpose(np.vstack([self.streak_x, self.streak_y]))
n = np.shape(self.obs)[1]
if nbatch<0:
nstep = 0
nbatch = -1
else:
nstep = np.int(n/nbatch)
i1 = 0
i2 = nbatch
for i in range(nstep):
XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])]))
self.emdist(XA, XB, i1=i1, i2=i2)
i1 += nbatch
i2 += nbatch
XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])]))
self.emdist(XA, XB, i1=i1, i2=n)
#self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header')
def emdist(self, XA, XB, i1=0, i2=-1):
""""""
distances = scipy.spatial.distance.cdist(XA, XB)
self.catalog['b'][i1:i2] = np.min(distances, axis=1)
imin = np.argmin(distances, axis=1)
self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1
self.catalog['l'][i1:i2] = self.streak_l[imin]
def _delta_path(self, x, pdot):
"""Return integrand for calculating length of a path along a polynomial"""
return np.sqrt(1 + np.polyval(pdot, x)**2)
def plot(self, mode='native', fig=None, color='k', **kwargs):
"""Plot stream"""
# Plotting
if fig==None:
plt.close()
plt.figure()
ax = plt.axes([0.12,0.1,0.8,0.8])
if mode=='native':
# Color setup
cindices = np.arange(self.setup['nstars']) # colors of stream particles
nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization
plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3)
plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.xlabel("X (kpc)")
plt.ylabel("Z (kpc)")
elif mode=='observed':
plt.subplot(221)
plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Dec")
plt.subplot(223)
plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Distance")
plt.subplot(222)
plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\\alpha$")
plt.subplot(224)
plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\delta$")
plt.tight_layout()
#plt.minorticks_on()
def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}):
"""Read stream star positions from a file"""
t = np.loadtxt(fname).T
n = np.shape(t)[1]
ns = int((n-1)/2)
self.setup['nstars'] = ns
# progenitor
self.setup['x0'] = t[:3,0] * units['x']
self.setup['v0'] = t[3:,0] * units['v']
# leading tail
self.leading = {}
self.leading['x'] = t[:3,1:ns+1] * units['x']
self.leading['v'] = t[3:,1:ns+1] * units['v']
# trailing tail
self.trailing = {}
self.trailing['x'] = t[:3,ns+1:] * units['x']
self.trailing['v'] = t[3:,ns+1:] * units['v']
def save(self, fname):
"""Save stream star positions to a file"""
# define table
t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
# add progenitor info
t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value]))
# add leading tail infoobsmode
tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# add trailing tail info
tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# save to file
t.write(fname, format='ascii.commented_header')
# make a streakline model of a stream
def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
################################
# Plot observed stream and model
if graph:
observed = load_stream(name)
Ndim = np.shape(observed.obs)[0]
modcol = 'k'
obscol = 'orange'
ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(12,4))
for i in range(3):
plt.sca(ax[i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream')
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model')
if i==0:
plt.legend(frameon=False, handlelength=0.5, fontsize='small')
plt.tight_layout()
if graphsave:
plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150)
return stream
def progenitor_params(n):
"""Return progenitor parameters for a given stream"""
if n==-1:
age = 1.6*u.Gyr
mi = 1e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = gd1_coordinates(observer=mw_observer)
elif n==-2:
age = 2.7*u.Gyr
mi = 1e5*u.Msun
mf = 2e4*u.Msun
x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0)
elif n==-3:
age = 3.5*u.Gyr
mi = 5e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = tri_coordinates(observer=mw_observer)
elif n==-4:
age = 2*u.Gyr
mi = 2e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = atlas_coordinates(observer=mw_observer)
out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf}
return out
def gal2eq(x, v, observer=mw_observer, vobs=vsun0):
""""""
# define reference frame
xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs)
# store coordinates
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
return(xobs, vobs)
def gd1_coordinates(observer=mw_observer):
"""Approximate GD-1 progenitor coordinates"""
x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-90, -250, -120]
return (x0, v0)
def pal5_coordinates(observer=mw_observer, vobs=vsun0):
"""Pal5 coordinates"""
# sdss
ra = 229.0128*u.deg
dec = -0.1082*u.deg
# bob's rrlyrae
d = 21.7*u.kpc
# harris
#d = 23.2*u.kpc
# odenkirchen 2002
vr = -58.7*u.km/u.s
# fritz & kallivayalil 2015
mua = -2.296*u.mas/u.yr
mud = -2.257*u.mas/u.yr
d = 24*u.kpc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer)
x0 = x.galactocentric
v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s)
return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist())
def tri_coordinates(observer=mw_observer):
"""Approximate Triangulum progenitor coordinates"""
x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-40, 155, 155]
return (x0, v0)
def atlas_coordinates(observer=mw_observer):
"""Approximate ATLAS progenitor coordinates"""
x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [40, 150, -120]
return (x0, v0)
# great circle orientation
def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True):
"""Save rotation matrix for a stream model"""
if stream==None:
stream = stream_model(name, pparams0=pparams, dt=dt)
# find the pole
ra = np.radians(stream.obs[0])
dec = np.radians(stream.obs[1])
rx = np.cos(ra) * np.cos(dec)
ry = np.sin(ra) * np.cos(dec)
rz = np.sin(dec)
r = np.column_stack((rx, ry, rz))
# fit the plane
x0 = np.array([0, 1, 0])
lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,))
x0 = lsq.x/np.linalg.norm(lsq.x)
ra0 = np.arctan2(x0[1], x0[0])
dec0 = np.arcsin(x0[2])
ra0 += np.pi
dec0 = np.pi/2 - dec0
# euler rotations
R0 = myutils.rotmatrix(np.degrees(-ra0), 2)
R1 = myutils.rotmatrix(np.degrees(dec0), 1)
R2 = myutils.rotmatrix(0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
# put xi = 50 at the beginning of the stream
xi[xi>180] -= 360
xi += 360
xi0 = np.min(xi) - 50
R2 = myutils.rotmatrix(-xi0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
if save:
np.save('../data/rotmatrix_{}'.format(name), R)
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
mock['rotmatrix'] = R
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
if graph:
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.plot(stream.obs[0], stream.obs[1], 'ko')
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
plt.sca(ax[1])
plt.plot(xi, eta, 'ko')
plt.xlabel('$\\xi$ (deg)')
plt.ylabel('$\\eta$ (deg)')
plt.ylim(-5, 5)
plt.tight_layout()
plt.savefig('../plots/gc_orientation_{}.png'.format(name))
return R
def wfit_plane(x, r, p=None):
"""Fit a plane to a set of 3d points"""
Np = np.shape(r)[0]
if np.any(p)==None:
p = np.ones(Np)
Q = np.zeros((3,3))
for i in range(Np):
Q += p[i]**2 * np.outer(r[i], r[i])
x = x/np.linalg.norm(x)
lsq = np.inner(x, np.inner(Q, x))
return lsq
# observed streams
#def load_stream(n):
#"""Load stream observations"""
#if n==-1:
#observed = load_gd1(present=[0,1,2,3])
#elif n==-2:
#observed = load_pal5(present=[0,1,2,3])
#elif n==-3:
#observed = load_tri(present=[0,1,2,3])
#elif n==-4:
#observed = load_atlas(present=[0,1,2,3])
#return observed
def endpoints(name):
""""""
stream = load_stream(name)
# find endpoints
amin = np.argmin(stream.obs[0])
amax = np.argmax(stream.obs[0])
ra = np.array([stream.obs[0][i] for i in [amin, amax]])
dec = np.array([stream.obs[1][i] for i in [amin, amax]])
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
# rotate endpoints
R = mock['rotmatrix']
xi, eta = myutils.rotate_angles(ra, dec, R)
#xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
mock['ra_range'] = ra
mock['xi_range'] = xi #np.percentile(xi, [10,90])
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
def load_pal5(present, nobs=50, potential='gal'):
""""""
if len(present)==2:
t = Table.read('../data/pal5_members.txt', format='ascii.commented_header')
dist = 21.7
deltadist = 0.7
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==3:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_gd1(present, nobs=50, potential='gal'):
""""""
if len(present)==3:
t = Table.read('../data/gd1_members.txt', format='ascii.commented_header')
dist = 0
deltadist = 0.5
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
d += t['l']*0.04836 + 9.86
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs#[np.array(present)]
observed.obsunit = obsunit
observed.err = err#[np.array(present)]
observed.obserror = obserr
return observed
def load_tri(present, nobs=50, potential='gal'):
""""""
if len(present)==4:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
if len(present)==3:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_atlas(present, nobs=50, potential='gal'):
""""""
ra, dec = atlas_track()
n = np.size(ra)
d = np.random.randn(n)*2 + 20
obs = np.array([ra, dec, d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def atlas_track():
""""""
ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90)
# euler rotations
D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]])
C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]])
B = np.diag(np.ones(3))
R = np.dot(B, np.dot(C, D))
Rinv = np.linalg.inv(R)
l0 = np.linspace(0, 2*np.pi, 500)
b0 = np.zeros(500)
xeq, yeq, zeq = myutils.eq2car(l0, b0)
eq = np.column_stack((xeq, yeq, zeq))
eq_rot = np.zeros(np.shape(eq))
for i in range(np.size(l0)):
eq_rot[i] = np.dot(Rinv, eq[i])
l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2])
ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot)
ind_s = (ra_s>17) & (ra_s<30)
ra_s = ra_s[ind_s]
dec_s = dec_s[ind_s]
return (ra_s, dec_s)
def fancy_name(n):
"""Return nicely formatted stream name"""
names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'}
return names[n]
# model parameters
def get_varied_pars(vary):
"""Return indices and steps for a preset of varied parameters, and a label for varied parameters
Parameters:
vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof"""
if type(vary) is not list:
vary = [vary]
Nt = len(vary)
vlabel = '_'.join(vary)
pid = []
dp = []
for v in vary:
o1, o2 = get_varied_bytype(v)
pid += o1
dp += o2
return (pid, dp, vlabel)
def get_varied_bytype(vary):
"""Get varied parameter of a particular type"""
if vary=='potential':
pid = [5,6,8,10,11]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun]
elif vary=='bary':
pid = [0,1,2,3,4]
# gd1
dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc]
## atlas & triangulum
#dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc]
# pal5
dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc]
dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
elif vary=='halo':
pid = [5,6,8,10]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
elif vary=='progenitor':
pid = [26,27,28,29,30,31]
dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr]
elif vary=='dipole':
pid = [11,12,13]
#dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)]
dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2]
elif vary=='quad':
pid = [14,15,16,17,18]
dp = [0.5*u.Gyr**-2 for x in range(5)]
elif vary=='octu':
pid = [19,20,21,22,23,24,25]
dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)]
else:
pid = []
dp = []
return (pid, dp)
def get_parlabel(pid):
"""Return label for a list of parameter ids
Parameter:
pid - list of parameter ids"""
master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ]
master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ]
if type(pid) is list:
labels = []
units = []
for i in pid:
labels += [master[i]]
units += [master_units[i]]
else:
labels = master[pid]
units = master_units[pid]
return (labels, units)
def get_steps(Nstep=50, log=False):
"""Return deltax steps in both directions
Paramerets:
Nstep - number of steps in one direction (default: 50)
log - if True, steps are logarithmically spaced (default: False)"""
if log:
step = np.logspace(-10, 1, Nstep)
else:
step = np.linspace(0.1, 10, Nstep)
step = np.concatenate([-step[::-1], step])
return (Nstep, step)
def lmc_position():
""""""
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d)
xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si]
print(xgal)
def lmc_properties():
""""""
# penarrubia 2016
mass = 2.5e11*u.Msun
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d)
cgal1 = c1.transform_to(coord.Galactocentric)
xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc
return (mass, xgal)
# fit bspline to a stream model
def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit bspline to a stream model and save to file"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
fidsort = np.argsort(stream_fid.obs[0])
ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs)
tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)]
llabel = 'b-spline fit'
else:
llabel = ''
plt.close()
fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim-1):
plt.sca(ax[0][i])
plt.plot(stream.obs[0], stream.obs[i+1], 'ko')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel)
if fiducial:
fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k)
plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1)
plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial')
plt.ylabel(ylabel[i+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[i][0], ylims[i][1])
plt.sca(ax[1][i])
if fiducial:
yref = fits_fid(stream.obs[0])
ycolor = 'b'
else:
yref = fits[i](stream.obs[0])
ycolor = 'r'
plt.axhline(0, color=ycolor, lw=2)
if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1)
plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko')
if fiducial:
fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k)
plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel)
plt.xlabel(ylabel[0])
plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0]))
if fiducial:
plt.sca(ax[0][Ndim-2])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit each tail individually"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
plt.close()
fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim):
plt.sca(ax[0][i])
Nhalf = int(0.5*np.size(stream.obs[i]))
plt.plot(stream.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:], 'o')
if fiducial:
plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.ylabel(ylabel[i])
plt.sca(ax[1][i])
if fiducial:
plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o')
if fiducial:
plt.sca(ax[0][Ndim-1])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
else:
return fig
def get_stream_limits(n, align=False):
"""Return lists with limiting values in different dimensions"""
if n==-1:
xlims = [260, 100]
ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]]
elif n==-2:
xlims = [250, 210]
ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]]
elif n==-3:
xlims = [27, 17]
ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]]
elif n==-4:
xlims = [35, 10]
ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]]
if align:
ylims[0] = [-5, 5]
xup = [110, 110, 80, 80]
xlims = [xup[np.abs(n)-1], 40]
return (xlims, ylims)
# step sizes for derivatives
def iterate_steps(n):
"""Calculate derivatives for different parameter classes, and plot"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
step_convergence(n, Nstep=10, vary=vary)
choose_step(n, Nstep=10, vary=vary)
def iterate_plotsteps(n):
"""Plot stream models for a variety of model parameters"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
pid, dp, vlabel = get_varied_pars(vary)
for p in range(len(pid)):
plot_steps(n, p=p, Nstep=5, vary=vary, log=False)
def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun):
"""Plot stream for different values of a potential parameter"""
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
pparams0 = pparams_fid
pid, dp, vlabel = get_varied_pars(vary)
plabel, punit = get_parlabel(pid[p])
Nstep, step = get_steps(Nstep=Nstep, log=log)
plt.close()
fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]})
# fiducial model
stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs)
Nobs = 10
k = 3
isort = np.argsort(stream0.obs[0])
ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs)
t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)]
fits = [None]*5
for j in range(5):
fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
color = mpl.cm.RdBu(i/(2*Nstep-1))
#print(i, dp[p], pparams)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
plt.sca(ax[0][j])
plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2)
plt.sca(ax[1][j])
plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[2][j])
plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[3][j])
plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2)
# symmetric derivatives
ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100)
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx = -dy / np.abs(2*step[i]*dp[p])
plt.sca(ax[4][j])
plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i)
# labels, limits
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
for j in range(5):
plt.sca(ax[0][j])
plt.ylabel(ylabel[j+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[j][0], ylims[j][1])
plt.sca(ax[1][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[2][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[3][j])
plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel))
plt.sca(ax[4][j])
plt.xlabel(ylabel[0])
plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel))
#plt.suptitle('Varying {}'.format(plabel), fontsize='small')
plt.tight_layout()
plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep))
def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50):
"""Check deviations in numerical derivatives for consecutive step sizes"""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# fiducial model
pparams0 = pparams_fid
stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
if np.any(~np.isfinite(ra_der)):
ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra)
Nra = np.size(ra_der)
# parameters to vary
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
dpvec = np.array([x.value for x in dp])
Nstep, step = get_steps(Nstep=Nstep, log=log)
dydx_all = np.empty((Np, Nstep, 5, Nra))
dev_der = np.empty((Np, Nstep-2*layer))
step_der = np.empty((Np, Nstep-2*layer))
for p in range(Np):
plabel = get_parlabel(pid[p])
if verbose: print(p, plabel)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
if verbose: print(i, s)
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
# symmetric derivatives
dydx = np.empty((Nstep, 5, Nra))
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx[i][j] = -dy / np.abs(2*step[i]*dp[p])
dydx_all[p] = dydx
# deviations from adjacent steps
step_der[p] = -step[layer:Nstep-layer] * dp[p]
for i in range(layer, Nstep-layer):
dev_der[p][i-layer] = 0
for j in range(5):
for l in range(layer):
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2)
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2)
np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:]))
if graph:
plt.close()
fig, ax = plt.subplots(1,Np,figsize=(4*Np,4))
for p in range(Np):
plt.sca(ax[p])
plt.plot(step_der[p], dev_der[p], 'ko')
#plabel = get_parlabel(pid[p])
#plt.xlabel('$\Delta$ {}'.format(plabel))
plt.ylabel('D')
plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
plabels, units = get_parlabel(pid)
punits = ['({})'.format(x) if len(x) else '' for x in units]
t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer))
dev = t['dev']
step = t['step']
dydx = t['ders']
steps_all = t['steps_all'][:,::-1]
Nra = np.shape(dydx)[-1]
best = np.empty(Np)
# plot setup
da = 4
nrow = 2
ncol = Np
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]})
for p in range(Np):
# choose step
dmin = np.min(dev[p])
dtol = tolerance * dmin
opt_step = np.min(step[p][dev[p]<dtol])
opt_id = step[p]==opt_step
best[p] = opt_step
## largest step w deviation smaller than 1e-4
#opt_step = np.max(step[p][dev[p]<1e-4])
#opt_id = step[p]==opt_step
#best[p] = opt_step
plt.sca(ax[0][p])
for i in range(5):
for j in range(10):
plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5)
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.ylim(-1,1)
plt.ylabel('Derivative')
plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small')
plt.sca(ax[1][p])
plt.plot(step[p], dev[p], 'ko')
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.plot(step[p][opt_id], dev[p][opt_id], 'ro')
plt.axhline(dtol, ls='-', color='orange', lw=1)
y0, y1 = plt.gca().get_ylim()
plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p]))
plt.ylabel('Derivative deviation')
np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best)
plt.tight_layout(h_pad=0)
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def read_optimal_step(name, vary, equal=False):
"""Return optimal steps for a range of parameter types"""
if type(vary) is not list:
vary = [vary]
dp = np.empty(0)
for v in vary:
dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v))
dp = np.concatenate([dp, dp_opt])
if equal:
dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01])
return dp
def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
iexsort = np.argsort(fiducial.obs[0])
raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs)
tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)]
fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k)
nrow = 2
ncol = np.int64((Np+1)/nrow)
da = 4
c = ['b', 'b', 'b', 'r', 'r', 'r']
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False)
for p in range(Np):
plt.sca(ax[p%2][int(p/2)])
for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k)
plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i])
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
#print(get_parlabel(p))
plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium')
plt.tight_layout()
plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200)
# observing modes
def define_obsmodes():
"""Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes"""
obsmodes = {}
obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]}
obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]}
obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]}
pickle.dump(obsmodes, open('../data/observing_modes.info','wb'))
def obsmode_name(mode):
"""Return full name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_names.keys()
names = []
for m in mode:
if m in keys:
name = full_names[m]
else:
name = m
names += [name]
return names
# crbs using bspline
def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = np.sort(mock['xi_range'])
else:
rotmatrix = np.eye(3)
xmm = np.sort(mock['ra_range'])
# typical uncertainties and data availability
obsmodes = pickle.load(open('../data/observing_modes.info', 'rb'))
if errmode not in obsmodes.keys():
errmode = 'fiducial'
sig_obs = obsmodes[errmode]['sig_obs']
data_dim = obsmodes[errmode]['Ndim']
# mock observations
if np.any(~np.isfinite(ra)):
if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin):
dd = (xmm[1]-xmm[0])/Nmin
ra = np.arange(xmm[0], xmm[1]+dd, dd)
#ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs)
#else:
Nobs = np.size(ra)
print(name, Nobs)
err = np.tile(sig_obs, Nobs).reshape(Nobs,-1)
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)]
if scale:
dp_unit = unity_scale(dp)
dps = [x*y for x,y in zip(dp, dp_unit)]
# calculate derivatives for all parameters
for p in range(Np):
for i, s in enumerate([-1, 1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
for j in range(5):
fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
# populate matrix of derivatives and calculate CRB
for Ndim in data_dim:
#for Ndim in [6,]:
Ndata = Nobs * (Ndim - 1)
cyd = np.empty(Ndata)
dydx = np.empty((Np, Ndata))
dy2 = np.empty((2, Np, Ndata))
for j in range(1, Ndim):
for p in range(Np):
dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra)
dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra)
dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra)
#positive = np.abs(dy)>0
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy))))
if scale:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value)
else:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value)
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs]))))
#print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs]))
cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2
np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt)
# data component of the Fisher matrix
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
dxi = np.matmul(dydx, caux)
# component based on prior knowledge of model parameters
pxi = priors(name, vary)
# full Fisher matrix
cxi = dxi + pxi
if verbose:
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
sx = np.sqrt(np.diag(cx))
print('CRB', sx)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi)
def priors(name, vary):
"""Return covariance matrix with prior knowledge about parameters"""
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
cprog = mock['prog_prior']
cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2
chalo = np.zeros(4)
cdipole = np.zeros(3)
cquad = np.zeros(5)
coctu = np.zeros(7)
priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu}
cprior = np.empty(0)
for v in vary:
cprior = np.concatenate([cprior, priors[v]])
pxi = np.diag(cprior)
return pxi
def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
#dp = read_optimal_step(name, vary)
d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
dydx = d['dydx']
cyd = d['cyd']
y = d['y']
dp = d['dp']
dy = (y[1,:,:] - y[0,:,:])
dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis])
scaling_par = np.median(np.abs(dydx), axis=1)
dydx = dydx / scaling_par[:,np.newaxis]
dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1))
scaling_dim = np.median(np.abs(dydx_), axis=(2,0))
dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis]
cyd_ = np.reshape(cyd, (Ndim-1, -1))
cyd_ = cyd_ / scaling_dim[:,np.newaxis]
cyd = np.reshape(cyd_, (-1))
dydx = np.reshape(dydx_, (len(dp), -1))
mmin = np.min(np.abs(dy), axis=0)
mmax = np.max(np.abs(dy), axis=0)
mmed = np.median(np.abs(dydx), axis=1)
dyn_range = mmax/mmin
#print(dyn_range)
print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range))
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
cxi = np.matmul(dydx, caux)
print('condition {:e}'.format(np.linalg.cond(cxi)))
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi, maxiter=30)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
def unity_scale(dp):
""""""
dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4])
dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3])
dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))]
return dp_unit
def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = d['cxi']
N = np.shape(cxi)[0]
cx_ = np.linalg.inv(cxi)
cx = stable_inverse(cxi, verbose=True, maxiter=100)
#cx_ii = stable_inverse(cx, verbose=True, maxiter=50)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N)))
print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N)))
#print(np.matmul(cx,cxi))
#print('inverse inverse', np.allclose(cx_ii, cxi))
def stable_inverse(a, maxiter=20, verbose=False):
"""Invert a matrix with a bad condition number"""
N = np.shape(a)[0]
# guess
q = np.linalg.inv(a)
qa = np.matmul(q,a)
# iterate
for i in range(maxiter):
if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N)))
if np.allclose(qa, np.eye(N)):
return q
qai = np.linalg.inv(qa)
q = np.matmul(qai,q)
qa = np.matmul(q,a)
return q
def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
#print(cx[0][0])
if plot=='halo':
cx = cx[:4, :4]
params = params[:4]
elif plot=='bary':
cx = cx[4:9, 4:9]
params = params[4:9]
elif plot=='progenitor':
cx = cx[9:, 9:]
params = params[9:]
Nvar = len(params)
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2)
plt.gca().add_patch(e)
plt.gca().autoscale_view()
#plt.xlim(-ylim[i],ylim[i])
#plt.ylim(-ylim[j], ylim[j])
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot))
def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'):
"""Show correlations in CRB between a chosen set of parameters in a triangle plot"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, Ndim in enumerate([3, 4, 6]):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
#cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot))
def compare_optimal_steps():
""""""
vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad']
vary = ['progenitor', 'bary', 'halo']
for name in ['gd1', 'tri']:
print(name)
print(read_optimal_step(name, vary))
def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True):
""""""
if first:
store_progparams(name)
wrap_angles(name, save=True)
progenitor_prior(name)
find_greatcircle(name=name)
endpoints(name)
for v in vary:
step_convergence(name=name, Nstep=Nstep, vary=v)
choose_step(name=name, Nstep=Nstep, vary=v)
calculate_crb(name=name, vary=vary, verbose=True)
crb_triangle_alldim(name=name, vary=vary)
########################
# cartesian coordinates
# accelerations
def acc_kepler(x, p=1*u.Msun):
"""Keplerian acceleration"""
r = np.linalg.norm(x)*u.kpc
a = -G * p * 1e11 * r**-3 * x
return a.to(u.pc*u.Myr**-2)
def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]):
""""""
r = np.linalg.norm(x)*u.kpc
a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2)
return a
def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]):
""""""
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2)
a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2))
return a
def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]):
""""""
r = np.linalg.norm(x)*u.kpc
q = np.array([1*u.Unit(1), p[2], p[3]])
a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2)
return a
def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]):
"""Acceleration due to outside dipole perturbation"""
pv = [x.value for x in p]
a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2
return a
def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Acceleration due to outside quadrupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = 0.5*np.sqrt(15/np.pi)
a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3]
a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1]
a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2]
return a.to(u.pc*u.Myr**-2)
def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Acceleration due to outside octupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
xu = x.unit
pu = p[0].unit
pvec = np.array([i.value for i in p]) * pu
dmat = np.ones((3,7)) * f * pvec * xu**2
x = np.array([i.value for i in x])
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
a = np.einsum('ij->i', dmat) * dmat.unit
return a.to(u.pc*u.Myr**-2)
# derivatives
def der_kepler(x, p=1*u.Msun):
"""Derivative of Kepler potential parameters wrt cartesian components of the acceleration"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun
dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11
return dmat.value
def pder_kepler(x, p=1*u.Msun):
"""Derivative of cartesian components of the acceleration wrt to Kepler potential parameter"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1
dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11
return dmat.value
def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]):
"""Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters"""
p = pu
q = np.array([1, p[2], p[3]])
# physical quantities
r = np.linalg.norm(x)*u.kpc
a = acc_nfw(x, p=pu)
# derivatives
dmat = np.zeros((3, 4))
# Vh
dmat[:,0] = 2*a/p[0]
# Rh
dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2
# qy, qz
for i in [1,2]:
dmat[i,i+1] = (-2*a[i]/q[i]).value
return dmat
def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters"""
# coordinates
r = np.linalg.norm(x)*u.kpc
# accelerations
ab = acc_bulge(x, p=pu[:2])
# derivatives
dmat = np.zeros((3, 2))
# Mb
dmat[:,0] = ab/pu[0]
# ab
dmat[:,1] = 2 * ab / (r + pu[1])
return dmat
def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters"""
# coordinates
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
aux = np.sqrt(z**2 + pu[2]**2)
# accelerations
ad = acc_disk(x, p=pu)
# derivatives
dmat = np.zeros((3, 3))
# Md
dmat[:,0] = ad / pu[0]
# ad
dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2)
# bd
dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux
dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value
return dmat
def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt((4*np.pi)/3)
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt(3/(4*np.pi))
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a"""
f = 2/np.sqrt(15/np.pi)
s = np.sqrt(3)
x = [1e-3/i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0])
return dmat
def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters"""
f = 0.5*np.sqrt(15/np.pi)
s = 1/np.sqrt(3)
x = [1e-3*i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0])
return dmat
def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters"""
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
x = [1e-3*i.value for i in x]
dmat = np.ones((3,7)) * f
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
return dmat
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
xi = np.array([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = np.array(x0)*u.kpc
d = 50
Nb = 20
x = np.linspace(x0[0]-d, x0[0]+d, Nb)
y = np.linspace(x0[1]-d, x0[1]+d, Nb)
x = np.linspace(-d, d, Nb)
y = np.linspace(-d, d, Nb)
xv, yv = np.meshgrid(x, y)
xf = np.ravel(xv)
yf = np.ravel(yv)
af = np.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T
elif j==2:
xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = np.array([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = np.matmul(dqda, np.matmul(cqi, dqda.T))
if fast:
ca = np.linalg.inv(cai)
else:
ca = stable_inverse(cai)
a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = np.abs(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-small')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
def acc_cart(x, components=['bary', 'halo', 'dipole']):
""""""
acart = np.zeros(3) * u.pc*u.Myr**-2
dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]}
accelerations = []
for c in components:
accelerations += dict_acc[c]
for acc in accelerations:
a_ = acc(x)
acart += a_
return acart
def acc_rad(x, components=['bary', 'halo', 'dipole']):
"""Return radial acceleration"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
a_cart = acc_cart(x, components=components)
a_rad = np.dot(a_cart, trans)
return a_rad
def ader_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_rad(x, components=['bary', 'halo', 'dipole']):
"""Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
dadq_cart = apder_cart(x, components=components)
dadq_rad = np.einsum('ij,i->j', dadq_cart, trans)
return dadq_rad
def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = 3e-1
vmax = 1e1
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix, 3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i] = dadq
ca = np.matmul(dadq, np.matmul(cq, dadq.T))
a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2
if relative:
af[i] = np.abs(a_crb/a)
else:
af[i] = a_crb
#print(xi, a_crb)
# save
np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$']
for i in range(3):
plt.sca(ax[i])
im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm())
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i] + rlabel)
plt.tight_layout()
plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative))
def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()}
x0, v0 = prog_coords[n]
print(x0)
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
## check orthogonality:
#for i in range(Npot-1):
#for k in range(i+1, Npot):
#print(i, k)
#print(np.dot(vecs[:,i], vecs[:,k]))
#print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]))
# save
np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-2
vmax = 5e0
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
for i in range(3):
plt.sca(ax[i])
#im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax)
im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm)
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i])
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor))
def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10):
"""Plot acceleration field in R,z plane"""
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
x0 = np.array([4, 4, 0])
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
for i in range(Npix):
xi = xin[i]*u.kpc
acart = acc_cart(xi, components=components)
acart_pix[i] = acart
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
plt.close()
plt.figure()
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1])
plt.tight_layout()
def a_crbcov_vecfield(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all', j=0, align=True, d=20, Nb=10, fast=False, scale=True, relative=False, progenitor=False, batch=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
vcomb_pix = np.empty((Npix, 2))
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
acart_pix[i] = a
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-3
vmax = 1e-1
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(xin[:,0])
#vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(vcomb[0::3])
vcomb_pix[:,1] = vcomb[2::3]
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1], pivot='middle')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Acceleration {}'.format(component), fontsize='medium')
plt.sca(ax[1])
plt.quiver(Rin, zin, vcomb_pix[:,0], vcomb_pix[:,1], pivot='middle', headwidth=0, headlength=0, headaxislength=0, scale=0.02, scale_units='xy')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Eigenvector {}'.format(np.abs(j)), fontsize='medium')
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/afield_crbcov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative))
def summary(n, mode='scalar', vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
fn = {'scalar': crb_acart_cov, 'vector': a_crbcov_vecfield}
bins = {'scalar': 30, 'vector': 10}
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
Npars = {'bary': Nbary, 'halo': Nhalo, 'dipole': Ndipole, 'quad': Nquad, 'point': Npoint}
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
Niter = [Npars[x] for x in components]
Niter = sum(Niter) + 1
pp = PdfPages('../plots/acceleration_{}_{}_{}_{}_{}.pdf'.format(n, errmode, vlabel, component, mode))
for i in range(Niter):
print(i, Niter)
fig = fn[mode](-1, progenitor=True, batch=True, errmode=errmode, vary=vary, component=component, j=-i, d=20, Nb=bins[mode])
pp.savefig(fig)
pp.close()
#########
# Summary
def full_names():
""""""
full = {'gd1': 'GD-1', 'atlas': 'ATLAS', 'tri': 'Triangulum', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'ophiuchus': 'Ophiuchus', 'hermus': 'Hermus', 'kwando': 'Kwando', 'orinoco': 'Orinoco', 'sangarius': 'Sangarius', 'scamander': 'Scamander'}
return full
def full_name(name):
""""""
full = full_names()
return full[name]
def get_done(sort_length=False):
""""""
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'ophiuchus', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
# length
if sort_length:
tosort = []
for name in done:
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
tosort += [np.max(mock['xi_range']) - np.min(mock['xi_range'])]
done = [x for _,x in sorted(zip(tosort,done))]
else:
tosort = []
vary = ['progenitor', 'bary', 'halo']
Ndim = 6
errmode = 'fiducial'
align = True
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_vh = myutils.wherein(np.array(pid), np.array([5]))
for name in done:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
tosort += [crb[pid_vh]]
done = [x for _,x in sorted(zip(tosort,done))][::-1]
return done
def store_mocks():
""""""
done = get_done()
for name in done:
stream = stream_model(name)
np.save('../data/streams/mock_observed_{}'.format(name), stream.obs)
def period(name):
"""Return orbital period in units of stepsize and number of complete periods"""
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
a = np.abs(np.fft.rfft(r))
f = np.argmax(a[1:]) + 1
p = np.size(a)/f
return (p, f)
def extract_crbs(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', j=0, align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
tout = Table(names=('name', 'crb'))
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
plt.close()
fig, ax = plt.subplots(Np,1,figsize=(10,15), subplot_kw=dict(projection='mollweide'))
for name in names[:]:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
#print([pparams0[pid_comp[i]] for i in range(Np)])
crb_frac = [crb[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)]
print(name, crb_frac)
stream = stream_model(name=name)
for i in range(Np):
plt.sca(ax[i])
color_index = np.array(crb_frac[:])
color_index[color_index>0.2] = 0.2
color_index /= 0.2
color = mpl.cm.viridis(color_index[i])
plt.plot(np.radians(stream.obs[0]), np.radians(stream.obs[1]), 'o', color=color, ms=4)
for i in range(Np):
plt.sca(ax[i])
#plt.xlabel('RA')
plt.ylabel('Dec')
plt.text(0.9, 0.9, '$\Delta$ {}'.format(get_parlabel(pid_comp[i])[0]), fontsize='medium', transform=plt.gca().transAxes, va='bottom', ha='left')
plt.grid()
plt.xlabel('RA')
# add custom colorbar
sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis, norm=plt.Normalize(vmin=0, vmax=20))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
if component=='bary':
cb_pad = 0.1
else:
cb_pad = 0.06
cb = fig.colorbar(sm, ax=ax.ravel().tolist(), pad=cb_pad, aspect=40, ticks=np.arange(0,21,5))
cb.set_label('Cramer $-$ Rao bounds (%)')
#plt.tight_layout()
plt.savefig('../plots/crb_onsky_{}.png'.format(component))
def vhrh_correlation(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', align=True):
""""""
names = get_done()
t = Table.read('../data/crb/ar_orbital_summary.fits')
N = len(names)
p = np.empty(N)
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
i = pid_comp[0]
j = pid_comp[1]
for e, name in enumerate(names):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
p[e] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j])
plt.close()
plt.figure()
plt.plot(t['rapo'], p, 'ko')
def allstream_2d(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, relative=False):
"""Compare 2D constraints between all streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
# plot setup
ncol = np.int64(np.ceil(np.sqrt(N)))
nrow = np.int64(np.ceil(N/ncol))
w_ = 8
h_ = 1.1 * w_*nrow/ncol
alpha = 1
lw = 2
frac = [0.8, 0.5, 0.2]
# parameter pairs
paramids = [8, 11, 12, 13, 14]
all_comb = list(itertools.combinations(paramids, 2))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
#print(comb)
pp = PdfPages('../plots/allstreams_2d_{}_a{:1d}_{}_r{:1d}.pdf'.format(errmode, align, vlabel, relative))
for c in range(Ncomb):
l, k = comb[c]
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(w_, h_), sharex=True, sharey=True)
for i in range(N):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
for e, Ndim in enumerate([3,4,6]):
color = mpl.cm.bone(frac[e])
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, names[i], align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cx_2d = np.array([[cx[k][k], cx[k][l]], [cx[l][k], cx[l][l]]])
if relative:
pk = pparams_fid[pid[k]].value
pl = pparams_fid[pid[l]].value
fid_2d = np.array([[pk**2, pk*pl], [pk*pl, pl**2]])
cx_2d = cx_2d / fid_2d * 100**2
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw)
plt.gca().add_patch(e)
txt = plt.text(0.9, 0.9, full_name(names[i]), fontsize='small', transform=plt.gca().transAxes, ha='right', va='top')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
if relative:
plt.xlim(-20, 20)
plt.ylim(-20,20)
else:
plt.gca().autoscale_view()
plabels, units = get_parlabel([pid[k],pid[l]])
if relative:
punits = [' (%)' for x in units]
else:
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
for i in range(ncol):
plt.sca(ax[nrow-1][i])
plt.xlabel(params[0])
for i in range(nrow):
plt.sca(ax[i][0])
plt.ylabel(params[1])
for i in range(N, ncol*nrow):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
plt.axis('off')
plt.tight_layout(h_pad=0, w_pad=0)
pp.savefig(fig)
pp.close()
# circular velocity
def pder_vc(x, p=[pparams_fid[j] for j in [0,1,2,3,4,5,6,8,10]], components=['bary', 'halo']):
""""""
N = np.size(x)
# components
if 'bary' in components:
bulge = np.array([G*x*(x+p[1])**-2, -2*G*p[0]*x*(x+p[1])**-3])
aux = p[3] + p[4]
disk = np.array([G*x**2*(x**2 + aux**2)**-1.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5])
nfw = np.array([2*p[5]*(p[6]/x*np.log(1+x.value/p[6].value) - (1+x.value/p[6].value)**-1), p[5]**2*(np.log(1+x.value/p[6].value)/x - (x+p[6])**-1 - x*(x+p[6])**-2), np.zeros(N), np.zeros(N)])
pder = np.vstack([bulge, disk, nfw])
else:
pder = np.array([2*p[0]*(p[1]/x*np.log(1+x.value/p[1].value) - (1+x.value/p[1].value)**-1), p[0]**2*(np.log(1+x.value/p[1].value)/x - (x+p[1])**-1 - x*(x+p[1])**-2), np.zeros(N), np.zeros(N)])
return pder
def delta_vc_vec(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, fast=False, scale=False, ascale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
#colors = {'gd1': mpl.cm.bone(0), 'atlas': mpl.cm.bone(0.5), 'tri': mpl.cm.bone(0.8)}
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
for name in names:
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
x = np.linspace(0.01, d, Nb)*u.kpc
Npix = np.size(x)
derf = np.transpose(pder_vc(x, components=components))
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Nb
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
#label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
#label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
mcomb = (vcomb*u.km**2*u.s**-2 * x / G).to(u.Msun)
vc_true = vcirc_potential(x, pparams=pparams_fid)
# relate to orbit
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
rcur = r[0]
r0 = r[-1]
print(name, rcur, r0)
e = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
np.savez('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dvc=np.sqrt(vcomb), vc=vc_true.value, r=x.value, rperi=rmin, rapo=rmax, rcur=rcur, r0=r0, ecc=e, l=l, p=p, Np=Np)
if ascale:
x = x * rmax**-1
#x = x * rcur**-1
# plot
plt.sca(ax[0])
plt.plot(x, np.sqrt(vcomb), '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, vc_true, 'r-')
plt.sca(ax[1])
plt.plot(x, np.sqrt(vcomb)/vc_true, '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, mcomb, '-', lw=3, color=colors[name], label=labels[name])
plt.sca(ax[0])
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ (km s$^{-1}$)')
#plt.ylim(0, 100)
plt.sca(ax[1])
plt.legend(loc=1, frameon=True, handlelength=1, fontsize='small')
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ / $V_c$')
#plt.ylabel('$\Delta$ $M_{enc}$ ($M_\odot$)')
#plt.ylim(0, 1e11)
plt.tight_layout()
plt.savefig('../plots/vc_r_summary_apo{:d}.pdf'.format(ascale))
def delta_vc_correlations(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, r=False, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
elabel = ''
ylabel = 'min ($\Delta$ $V_c$ / $V_c$)'
if r:
ylabel = 'r(min($\Delta$ $V_c$ / $V_c$)) (kpc)'
elabel = 'r'
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
plt.close()
fig, ax = plt.subplots(2,3,figsize=(15,9))
for name in names:
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
rel_dvc = np.min(d['dvc'] / d['vc'])
if r:
idmin = np.argmin(d['dvc'] / d['vc'])
rel_dvc = d['r'][idmin]
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
plt.sca(ax[0][0])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rapo'], rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('$r_{apo}$ (kpc)')
plt.ylabel(ylabel)
plt.sca(ax[0][1])
#plt.plot(d['rcur']/d['rapo'], rel_dvc, 'o', ms=10, color=colors[name])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rcur'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.plot(d['r0'], rel_dvc, 'ro')
plt.xlabel('$r_{current}$')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
ecc = np.sqrt(1 - (d['rperi']/d['rapo'])**2)
ecc = d['ecc']
plt.plot(ecc, rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('Eccentricity')
plt.ylabel(ylabel)
plt.sca(ax[1][0])
plt.plot(np.median(np.abs(d['l'][:,2])/np.linalg.norm(d['l'], axis=1)), rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('|L_z|/|L|')
plt.ylabel(ylabel)
plt.sca(ax[1][1])
plt.plot(d['Np'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.xlabel('$r_{peri}$ (kpc)')
plt.xlabel('Completed periods')
plt.ylabel(ylabel)
plt.sca(ax[1][2])
plt.plot(dlambda, rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('$\Delta$ $\\xi$ (deg)')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
plt.legend(fontsize='small', handlelength=0.1)
plt.tight_layout()
plt.savefig('../plots/delta_vc{}_correlations.pdf'.format(elabel))
def collate_orbit(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True):
"""Store all of the properties on streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
vcmin = np.empty(N)
r_vcmin = np.empty(N)
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
for e, name in enumerate(names[:]):
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
idmin = np.argmin(d['dvc'] / d['vc'])
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
tname[e] = name
vcmin[e] = (d['dvc'] / d['vc'])[idmin]
r_vcmin[e] = d['r'][idmin]
if e==0:
Nr = np.size(d['r'])
dvc = np.empty((N, Nr))
vc = np.empty((N, Nr))
r = np.empty((N, Nr))
dvc[e] = d['dvc']
vc[e] = d['dvc'] / d['vc']
r[e] = d['r']
Labs[e] = np.median(np.abs(d['l']), axis=0)
Lmod[e] = np.median(np.linalg.norm(d['l'], axis=1))
lx[e] = np.abs(np.median(d['l'][:,0]/np.linalg.norm(d['l'], axis=1)))
ly[e] = np.abs(np.median(d['l'][:,1]/np.linalg.norm(d['l'], axis=1)))
lz[e] = np.abs(np.median(d['l'][:,2]/np.linalg.norm(d['l'], axis=1)))
period[e] = d['p']
Nperiod[e] = d['Np']
ecc[e] = d['ecc']
rperi[e] = d['rperi']
rapo[e] = d['rapo']
rcur[e] = d['rcur']
length[e] = dlambda
t = Table([tname, vcmin, r_vcmin, dvc, vc, r, Labs, Lmod, lx, ly, lz, period, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'vcmin', 'rmin', 'dvc', 'vc', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur'))
t.pprint()
t.write('../data/crb/vc_orbital_summary.fits', overwrite=True)
# radial acceleration
def ar_r(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39):
"""Calculate precision in radial acceleration as a function of galactocentric radius"""
np.random.seed(seed)
pid, dp_fid, vlabel = get_varied_pars(vary)
components = [c for c in vary if c!='progenitor']
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
armin = np.empty((N, Nsight))
r_armin = np.empty((N, Nsight))
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period_ = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
Npix = 300
r = np.linspace(0.1, 200, Npix)
dar = np.empty((N, Nsight, Npix))
ar = np.empty((N, Nsight, Npix))
rall = np.empty((N, Nsight, Npix))
plt.close()
fig, ax = plt.subplots(1,3, figsize=(15,5))
for e, name in enumerate(names[:]):
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cq = cx[6:,6:]
Npot = np.shape(cq)[0]
# relate to orbit
orbit = stream_orbit(name=name)
ro = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(ro)
rmax = np.max(ro)
rcur_ = ro[0]
r0 = ro[-1]
e_ = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for s in range(Nsight):
if Nsight==1:
# single sightline
x0 = mock['x0']
xeq = coord.SkyCoord(ra=x0[0], dec=x0[1], distance=x0[2])
xg = xeq.transform_to(coord.Galactocentric)
rg = np.linalg.norm(np.array([xg.x.value, xg.y.value, xg.z.value]))
theta = np.arccos(xg.z.value/rg)
phi = np.arctan2(xg.y.value, xg.x.value)
else:
u_ = np.random.random(1)
v_ = np.random.random(1)
theta = np.arccos(2*u_ - 1)
phi = 2 * np.pi * v_
xin = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]).T
arad_pix = np.empty((Npix, 1))
af = np.empty(Npix)
derf = np.empty((Npix, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_rad(xi, components=components)
af[i] = a
dadq = apder_rad(xi, components=components)
derf[i] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
# store
idmin = np.argmin(vcomb / np.abs(af))
armin[e][s] = (vcomb / np.abs(af))[idmin]
r_armin[e][s] = r[idmin]
dar[e][s] = vcomb
ar[e][s] = vcomb / np.abs(af)
rall[e][s] = r
dlambda = | np.max(mock['xi_range']) | numpy.max |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
ARG model
=========
The code is an implementation of ARG model given in [1]_.
Its major features include:
* simulation of stochastic volatility and returns
* estimation using both MLE and GMM
* option pricing
References
----------
.. [1] <NAME> and <NAME> (2014)
"Affine Option Pricing Model in Discrete Time",
working paper, New Economic School.
<http://goo.gl/yRVsZp>
.. [2] <NAME> and <NAME> (2006)
"Autoregressive Gamma Processes",
2006, *Journal of Forecasting*, 25(2), 129–152. doi:10.1002/for.978
.. [3] <NAME>, <NAME>, and <NAME> (2006)
"Structural Laplace Transform and Compound Autoregressive Models"
*Journal of Time Series Analysis*, 27(4), 477–503.
doi:10.1111/j.1467-9892.2006.00479.x
"""
from __future__ import print_function, division
import warnings
import numpy as np
import numexpr as ne
import matplotlib.pylab as plt
import seaborn as sns
import scipy.stats as scs
from scipy.optimize import minimize
import numdifftools as nd
from statsmodels.tsa.tsatools import lagmat
from .argparams import ARGparams
from .helper_functions import days_from_maturity
from mygmm import GMM
from fangoosterlee import cosmethod
__all__ = ['ARG']
class ARG(object):
r"""Class for ARG model.
.. math::
E\left[\left.\exp\left\{ -uY_{t}\right\} \right|Y_{t-1}\right]
=\exp\left\{ -a(u)Y_{t-1}-b(u)\right\}
Attributes
----------
vol
Volatility series
ret
Asset return series
param
Parameters of the model
maturity
Maturity of the option or simply time horizon.
Fraction of a year, i.e. 30/365
riskfree
Risk-free rate of return per day
Methods
-------
afun
a(u) function
bfun
b(u) function
cfun
c(u) function
plot_abc
Vizualize functions a, b, and c
vsim
Simulate ARG(1) volatility process
vsim2
Simulate ARG(1) volatility process
rsim
Simulate returns given volatility
load_data
Load data to the class
estimate_mle
Estimate model parameters via Maximum Likelihood
estimate_gmm
Estimate model parameters using GMM
cos_restriction
Restrictions used in COS method of option pricing
charfun
Risk-neutral conditional characteristic function (one argument only)
option_premium
Model implied option premium via COS method
"""
def __init__(self, param=None):
"""Initialize class instance.
Parameters
----------
param : ARGparams instance, optional
Parameters of the model
"""
self.vol = None
self.ret = None
self.param = param
self.maturity = None
self.riskfree = None
def convert_to_q(self, param):
"""Convert physical (P) parameters to risk-neutral (Q) parameters.
Parameters
----------
param : ARGparams instance
Physical (P) parameters
Returns
-------
ARGparams instance
Risk-neutral parameters
"""
paramq = ARGparams()
paramq.update(theta=param.get_theta(), price_vol=param.price_vol)
factor = 1/(1 + param.get_scale() \
* (param.price_vol + self.alpha(param.price_ret, param)))
if factor <= 0 or factor > 1/param.rho**.5:
print('Lower bound for theta1 is %.2f'
% self.vol_price_lower_bound(param))
raise ValueError('Invalid parameters in Q conversion!')
delta = param.delta
scale = param.get_scale() * factor
beta = param.get_beta() * factor
rho = scale * beta
mean = scale * delta / (1 - rho)
paramq.update(theta_vol=[mean, rho, delta])
return paramq
def load_data(self, vol=None, ret=None):
"""Load data into the model object.
Parameters
----------
vol : (nobs, ) array
Volatility time series
ret : (nobs, ) array
Return time series
"""
if vol is not None:
self.vol = vol
if ret is not None:
self.ret = ret
def afun(self, uarg, param):
r"""Function a().
.. math::
a\left(u\right)=\frac{\rho u}{1+cu}
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
# return param.rho * uarg / (1 + param.scale * uarg)
rho = param.rho
scale = param.get_scale()
return ne.evaluate("rho * uarg / (1 + scale * uarg)")
def bfun(self, uarg, param):
r"""Function b().
.. math::
b\left(u\right)=\delta\log\left(1+cu\right)
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
# return param.delta * np.log(1 + param.scale * uarg)
scale = param.get_scale()
delta = param.delta
return ne.evaluate("delta * log(1 + scale * uarg)")
def afun_q(self, uarg, param):
r"""Risk-neutral function a().
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return self.afun(uarg, self.convert_to_q(param))
def bfun_q(self, uarg, param):
r"""Risk-neutral function b().
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return self.bfun(uarg, self.convert_to_q(param))
def dafun(self, uarg, param):
r"""Derivative of function a() with respect to scale, rho, and delta.
.. math::
\frac{\partial a}{\partial c}\left(u\right)
&=-\frac{\rho u^2}{\left(1+cu\right)^2} \\
\frac{\partial a}{\partial \rho}a\left(u\right)
&=\frac{u}{1+cu} \\
\frac{\partial a}{\partial \delta}a\left(u\right)
&=0
Parameters
----------
uarg : (nu, ) array
Grid
param : ARGparams instance
Model parameters
Returns
-------
(3, nu) array
"""
da_scale = -param.rho * uarg**2 / (param.get_scale()*uarg + 1)**2
da_rho = uarg / (param.get_scale()*uarg + 1)
da_delta = np.zeros_like(uarg)
return np.vstack((da_scale, da_rho, da_delta))
def dbfun(self, uarg, param):
r"""Derivative of function b() with respect to scale, rho, and delta.
.. math::
\frac{\partial b}{\partial c}\left(u\right)
&=\frac{\delta u}{1+cu} \\
\frac{\partial b}{\partial \rho}\left(u\right)
&=0 \\
\frac{\partial b}{\partial \delta}\left(u\right)
&=\log\left(1+cu\right)
Parameters
----------
uarg : (nu, ) array
Grid
param : ARGparams instance
Model parameters
Returns
-------
(3, nu) array
"""
db_scale = param.delta * uarg / (1 + param.get_scale() * uarg)
db_rho = np.zeros_like(uarg)
db_delta = np.log(1 + param.get_scale() * uarg)
return np.vstack((db_scale, db_rho, db_delta))
def cfun(self, uarg, param):
r"""Function c().
.. math::
c\left(u\right)=\delta\log\left\{1+\frac{cu}{1-\rho}\right\}
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return param.delta * np.log(1 + param.get_scale()*uarg / (1-param.rho))
def center(self, param):
"""No-arb restriction parameter.
Parameters
----------
param : ARGparams instance
Model parameters
Returns
-------
float
Same dimension as uarg
"""
return param.phi / (param.get_scale() * (1 + param.rho))**.5
def psi(self, param):
"""Function psi.
Parameters
----------
param : ARGparams instance
Model parameters
Returns
-------
float
"""
return (param.price_ret-.5) * (1-param.phi**2) + self.center(param)
def alpha(self, uarg, param):
"""Function alpha().
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return self.psi(param) * uarg - .5 * uarg**2 * (1 - param.phi**2)
def beta(self, uarg, param):
"""Function beta(). Same for risk-neutral.
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return uarg * self.afun_q(- self.center(param), param)
def gamma(self, uarg, param):
"""Function gamma(). Same for risk-neutral.
Parameters
----------
uarg : array
Grid
param : ARGparams instance
Model parameters
Returns
-------
array
Same dimension as uarg
"""
return uarg * self.bfun_q(- self.center(param), param)
def lgfun(self, uarg, varg, param):
"""Function l(u, v) and g(u, v) in joint characteristic function.
Parameters
----------
uarg : array
Grid for volatility
varg : array
Grid for returns
param : ARGparams instance
Model parameters
Returns
-------
lfun : array
gfun : array
"""
alpha = self.alpha(varg, param)
lfun = self.afun(uarg + alpha, param) + self.beta(varg, param)
gfun = self.bfun(uarg + alpha, param) + self.gamma(varg, param)
return lfun, gfun
def lgfun_q(self, uarg, varg, param):
"""Function l(u, v) and g(u, v) in joint risk-neutral
characteristic function.
Parameters
----------
uarg : array
Grid for volatility
varg : array
Grid for returns
param : ARGparams instance
Model parameters
Returns
-------
lfun : array
gfun : array
"""
lfun1, gfun1 = self.lgfun(uarg + param.price_vol,
varg + param.price_ret, param)
lfun2, gfun2 = self.lgfun(param.price_vol, param.price_ret, param)
lfun = lfun1 - lfun2
gfun = gfun1 - gfun2
return lfun, gfun
def ch_fun_elements(self, varg, param):
"""Functions psi(v, n) and ups(v, n) in risk-neutral
characteristic function of returns for n periods.
Parameters
----------
varg : array
Grid for returns
param : ARGparams instance
Model parameters
Returns
-------
psi : array
ups : array
"""
periods = days_from_maturity(self.maturity)
ones = np.ones_like(periods)
lfun, gfun = self.lgfun_q(0., varg, param)
varg, psi, ups = varg * ones, lfun * ones, gfun * ones
while True:
if np.array_equal(periods, ones):
return psi, ups
cond = periods > 1
periods[cond] -= 1
lfun, gfun = self.lgfun_q(psi[:, cond], varg[:, cond], param)
ups[:, cond] += gfun
psi[:, cond] = lfun
def store_ch_fun_elements(self, varg, param):
"""Functions psi(v, n) and ups(v, n) in risk-neutral
characteristic function of returns for n periods.
Parameters
----------
varg : array
Grid for returns
param : ARGparams instance
Model parameters
Returns
-------
psi : array
ups : array
"""
periods = np.max(days_from_maturity(self.maturity))
psi, ups = self.lgfun_q(0., varg, param)
psidict, upsdict = {1: psi}, {1: ups}
while True:
if periods == 1:
return psidict, upsdict
periods -= 1
lfun, gfun = self.lgfun_q(psi, varg, param)
ups += gfun
psi = lfun
psidict[periods], upsdict[periods] = psi, ups
def char_fun_ret_q(self, varg, param):
r"""Conditional risk-neutral Characteristic function (return).
Parameters
----------
varg : array_like
Grid for returns. Real values only.
param : ARGparams instance
Model parameters
Returns
-------
array_like
Characteristic function for each observation and each grid point
Notes
-----
Conditional on :math:`\sigma_t` only
All market data (vol, maturity, riskfree) can be vectors
of the same size, and varg can be a vector of another size,
but of transposed shape,
i.e. vol = np.ones(5), and varg = np.ones((10, 1))
"""
if self.vol is None:
raise ValueError('Volatility is not loaded!')
if self.maturity is None:
raise ValueError('Maturity is not loaded!')
if self.riskfree is None:
raise ValueError('Risk-free rate is not loaded!')
if | np.iscomplex(varg) | numpy.iscomplex |
import soundfile as sf
import numpy as np
import os
import io
import shutil
import pytest
import cffi
import sys
import gc
import weakref
# floating point data is typically limited to the interval [-1.0, 1.0],
# but smaller/larger values are supported as well
data_stereo = np.array([[1.75, -1.75],
[1.0, -1.0],
[0.5, -0.5],
[0.25, -0.25]])
data_mono = np.array([0, 1, 2, -2, -1], dtype='int16')
filename_stereo = 'tests/stereo.wav'
filename_mono = 'tests/mono.wav'
filename_raw = 'tests/mono.raw'
filename_new = 'tests/delme.please'
if sys.version_info >= (3, 6):
import pathlib
open_variants = 'name', 'fd', 'obj', 'pathlib'
else:
open_variants = 'name', 'fd', 'obj'
xfail_from_buffer = pytest.mark.xfail(cffi.__version_info__ < (0, 9),
reason="from_buffer() since CFFI 0.9")
def _file_existing(request, filename, fdarg, objarg=None):
if request.param == 'name':
return filename
if request.param == 'pathlib':
return pathlib.Path(filename)
elif request.param == 'fd':
fd = os.open(filename, fdarg)
def finalizer():
with pytest.raises(OSError):
os.close(fd)
request.addfinalizer(finalizer)
return fd
elif request.param == 'obj':
obj = open(filename, objarg, buffering=False)
request.addfinalizer(obj.close)
return obj
def _file_new(request, fdarg, objarg=None):
filename = filename_new
request.addfinalizer(lambda: os.remove(filename))
return _file_existing(request, filename, fdarg, objarg)
def _file_copy(request, filename, fdarg, objarg=None):
shutil.copy(filename, filename_new)
request.addfinalizer(lambda: os.remove(filename_new))
return _file_existing(request, filename_new, fdarg, objarg)
@pytest.fixture(params=open_variants)
def file_stereo_r(request):
return _file_existing(request, filename_stereo, os.O_RDONLY, 'rb')
@pytest.fixture(params=open_variants)
def file_mono_r(request):
return _file_existing(request, filename_mono, os.O_RDONLY, 'rb')
@pytest.fixture(params=open_variants)
def file_w(request):
return _file_new(request, os.O_CREAT | os.O_WRONLY, 'wb')
@pytest.fixture(params=open_variants)
def file_stereo_rplus(request):
return _file_copy(request, filename_stereo, os.O_RDWR, 'r+b')
@pytest.fixture(params=['obj'])
def file_obj_stereo_rplus(request):
return _file_copy(request, filename_stereo, os.O_RDWR, 'r+b')
@pytest.fixture(params=['obj'])
def file_obj_w(request):
return _file_new(request, os.O_CREAT | os.O_WRONLY, 'wb')
@pytest.fixture(params=open_variants)
def file_wplus(request):
return _file_new(request, os.O_CREAT | os.O_RDWR, 'w+b')
@pytest.yield_fixture
def file_inmemory():
with io.BytesIO() as f:
yield f
@pytest.yield_fixture
def sf_stereo_r(file_stereo_r):
with sf.SoundFile(file_stereo_r) as f:
yield f
@pytest.yield_fixture
def sf_stereo_w(file_w):
with sf.SoundFile(file_w, 'w', 44100, 2, format='WAV') as f:
yield f
@pytest.yield_fixture
def sf_stereo_rplus(file_stereo_rplus):
with sf.SoundFile(file_stereo_rplus, 'r+') as f:
yield f
@pytest.yield_fixture
def sf_stereo_wplus(file_wplus):
with sf.SoundFile(file_wplus, 'w+', 44100, 2,
format='WAV', subtype='FLOAT') as f:
yield f
# -----------------------------------------------------------------------------
# Test read() function
# -----------------------------------------------------------------------------
def test_if_read_returns_float64_data(file_stereo_r):
data, fs = sf.read(file_stereo_r)
assert fs == 44100
assert np.all(data == data_stereo)
assert data.dtype == np.float64
def test_read_float32(file_stereo_r):
data, fs = sf.read(file_stereo_r, dtype='float32')
assert np.all(data == data_stereo)
assert data.dtype == np.float32
def test_read_int16(file_mono_r):
data, fs = sf.read(file_mono_r, dtype='int16')
assert np.all(data == data_mono)
assert data.dtype == np.int16
def test_read_int32(file_mono_r):
data, fs = sf.read(file_mono_r, dtype='int32')
assert np.all(data // 2**16 == data_mono)
assert data.dtype == np.int32
def test_read_into_out(file_stereo_r):
out = np.empty((3, 2), dtype='float64')
data, fs = sf.read(file_stereo_r, out=out)
assert data is out
assert np.all(data == data_stereo[:3])
def test_if_read_into_malformed_out_fails(file_stereo_r):
out = np.empty((2, 3), dtype='float64')
with pytest.raises(ValueError):
sf.read(file_stereo_r, out=out)
def test_if_read_into_out_with_too_many_dimensions_fails(file_stereo_r):
out = np.empty((3, 2, 1), dtype='float64')
with pytest.raises(ValueError):
sf.read(file_stereo_r, out=out)
def test_if_read_into_zero_len_out_works(file_stereo_r):
out = np.empty((0, 2), dtype='float64')
data, fs = sf.read(file_stereo_r, out=out)
assert data is out
assert len(out) == 0
def test_read_into_non_contiguous_out(file_stereo_r):
out = np.empty(data_stereo.shape[::-1], dtype='float64')
if getattr(sys, 'pypy_version_info', (999,)) < (2, 6):
# The test for C-contiguous doesn't work with PyPy 2.5.0
sf.read(file_stereo_r, out=out.T)
else:
with pytest.raises(ValueError) as excinfo:
sf.read(file_stereo_r, out=out.T)
assert "C-contiguous" in str(excinfo.value)
def test_read_into_out_with_invalid_dtype(file_stereo_r):
out = | np.empty((3, 2), dtype='int64') | numpy.empty |
"""
Parallax fitting and computation of distances
"""
import os
import warnings
import collections
from bisect import bisect_left
import h5py
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d
from astropy.coordinates import SkyCoord
from healpy import ang2pix
from dustmaps.sfd import SFDQuery
from dustmaps.bayestar import BayestarWebQuery
from astropy.utils.exceptions import AstropyWarning
import basta.utils_distances as udist
import basta.constants as cnsts
import basta.stats as stats
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Don't print Astropy warnings (catch error caused by mock'ing astropy in Sphinx)
try:
warnings.filterwarnings("ignore", category=AstropyWarning)
except AssertionError:
pass
try:
from basta._dustpath import __dustpath__
except ModuleNotFoundError:
print("\nCannot find path to dustmaps. Did you run 'setup.py'?\n")
raise
def LOS_reddening(distanceparams):
"""
Returns color excess E(B-V) for a line of sight using a
pre-downloaded 3D extinction map provided by Green et al. 2015/2018 - see
http://argonaut.skymaps.info/.
The extinction map is only computed for distance modulus between
:math:`4 < m-M < 19` in units of magnitude.
Parameters
----------
distanceparams : dictionary
Dictionary with distance parameters
Returns
-------
EBV : function
excess color function
"""
if "EBV" in distanceparams:
return lambda x: np.asarray(
np.random.normal(
distanceparams["EBV"][1],
distanceparams["EBV"][2] - distanceparams["EBV"][1],
size=[
len(i) if isinstance(i, collections.Iterable) else 1 for i in [x]
][0],
)
)
frame = distanceparams["dustframe"]
# Convert to galactic coordinates
if frame == "icrs":
ra = distanceparams["RA"]
dec = distanceparams["DEC"]
c = SkyCoord(ra=ra, dec=dec, frame="icrs", unit="deg")
elif frame == "galactic":
lon = distanceparams["lon"]
lat = distanceparams["lat"]
c = SkyCoord(l=lon, b=lat, frame="galactic", unit="deg")
else:
raise ValueError("Unknown dust map frame for computing reddening!")
# Load extinction data cube
pathmap = os.path.join(__dustpath__, "bayestar/bayestar2019.h5")
dcube = h5py.File(pathmap, "r")
# Distance modulus bins
bin_edges = dcube["/pixel_info"].attrs["DM_bin_edges"]
dmbin = bin_edges + (bin_edges[1] - bin_edges[0]) / 2.0
# If webquery fails use local copy of dustmap
try:
bayestar = BayestarWebQuery(version="bayestar2019")
Egr_samples = bayestar(c, mode="samples")
except Exception:
# contains positional info
pinfo = dcube["/pixel_info"][:]
nsides = np.unique(dcube["/pixel_info"][:]["nside"])
# Convert coordinates to galactic frame
lon = c.galactic.l.deg
lat = c.galactic.b.deg
# Convert l,b[deg] to theta,phi[rad]
theta = np.pi / 2.0 - lat * np.pi / 180.0
phi = lon * np.pi / 180.0
# To check if we are within the maps coordinates
Egr_samples = np.array([np.nan])
# Run through nsides
for ncont in reversed(nsides):
healpixNside = ang2pix(ncont, theta, phi, nest=True)
indNside = np.where(np.asarray([x[0] for x in pinfo]) == ncont)[0]
dcubepixNside = [x[1] for x in pinfo[indNside]]
kNside = int(bisect_left(dcubepixNside, healpixNside)) + indNside[0]
if healpixNside == dcubepixNside[kNside - indNside[0]]:
index = kNside
Egr_samples = dcube["/samples"][index]
break
# If coordinates outside dust map, use Schegel
if np.isnan(Egr_samples).any():
print("WARNING: Coordinates outside dust map boundaries!")
print("Default to Schegel 1998 dust map")
sfd = SFDQuery()
EBV_fun = lambda x: np.full_like(x, sfd(c))
return EBV_fun
Egr_med, Egr_err = [], []
for i in range(len(dmbin)):
Egr_med.append(np.nanmedian(Egr_samples[:, i]))
Egr_err.append(np.nanstd(Egr_samples[:, i]))
Egr_med_fun = interp1d(
dmbin, Egr_med, bounds_error=False, fill_value=(0, np.max(Egr_med))
)
Egr_err_fun = interp1d(
dmbin, Egr_err, bounds_error=False, fill_value=np.max(Egr_err)
)
dcube.close()
def EBV_fun(dm):
Egr = np.asarray(np.random.normal(Egr_med_fun(dm), Egr_err_fun(dm)))
EBV = cnsts.extinction.Conv_Bayestar * Egr
return EBV
return EBV_fun
def get_EBV(dist, LOS_EBV, debug=False, outfilename=""):
"""
Estimate E(B-V) by drawing distances from a normal parallax
distribution with EDSD prior.
Parameters
-----
dist : array
The drawn distances
LOS_EBV : func
EBV function.
debug : bool, optional
Debug flag.
If True, this function outputs two plots, one of distance modulus
vs. E(B-V) and a histogram of the E(B-V).
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
Returns
-------
EBVs : array
E(B-V) at distances
"""
dmod = 5 * np.log10(dist / 10)
EBVs = LOS_EBV(dmod)
if debug:
plt.figure()
plt.plot(dmod, EBVs, ".")
plt.xlabel("dmod")
plt.ylabel("E(B-V)")
plt.savefig(outfilename + "_DEBUG_dmod_EBVs.png")
plt.close()
return EBVs
def get_absorption(EBV, fitparams, filt):
"""
Compute extinction coefficient Rzeta for band zeta.
Using parameterized law from Casagrande & VandenBerg 2014.
Valid for:
logg = 4.1
Teff = 5250 - 7000K
Fe/H = -2.0 - 0.25
a/Fe = -0.4 - 0.4
Assume nominal reddening law with RV=3.1. In a band zeta, Azeta = Rzeta*E(B-V).
Parameters
----------
EBV : array
E(B-V) values
fitparams : dict
The fitting params in inputparams.
filt : str
Name of the given filter
Returns
-------
R*EBV : array
Extinction coefficient times E(B-V)
"""
N = len(EBV)
table = cnsts.extinction.R
i_filter = table["Filter"] == filt
if not any(i_filter) or table["RZ_mean"][i_filter] == 0:
print("WARNING: Unknown extinction coefficient for filter: " + filt)
print(" Using reddening law coefficient R = 0.")
return np.zeros(N)
metal = "MeH" if "MeH" in fitparams else "FeH"
if "Teff" not in fitparams or metal not in fitparams:
R = np.ones_like(EBV) * table["RZ_mean"][i_filter].item()
else:
Teff_val, Teff_err = fitparams["Teff"]
metal_val, metal_err = fitparams[metal]
Teff = np.random.normal(Teff_val, Teff_err, size=N)
FeH = np.random.normal(metal_val, metal_err, size=N)
a0 = table["a0"][i_filter].item()
a1 = table["a1"][i_filter].item()
a2 = table["a2"][i_filter].item()
a3 = table["a3"][i_filter].item()
T4 = 1e-4 * Teff
R = a0 + T4 * (a1 + a2 * T4) + a3 * FeH
return R * EBV
def add_absolute_magnitudes(
inputparams, n=1000, k=1000, outfilename="", debug=False, use_gaussian_priors=False
):
"""
Convert apparent magnitudes to absolute magnitudes using the distance
Extinction E(B-V) is estimated based on Green et al. (2015) dust map.
Extinction is converted to reddening using Casagrande & VandenBerg 2014.
The converted colors and magnitudes are added to fitsparams.
Parameters
----------
inputparams : dict
Inputparams used in BASTA run.
n : int
Number of samples from parallax range
k : int
Number of samples from apparent magnitude range.
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
debug : bool, optional
Debug flag. If True, debugging plots will be outputted.
use_gaussian_priors : bool, optional
If True, gaussian priors will be used for apparent magnitude in
the distance computation.
Returns
-------
inputparams : dict
Modified version of inputparams including absolute magnitudes.
"""
if "parallax" not in inputparams["fitparams"]:
return inputparams
print("\nPreparing distance/parallax/magnitude input ...", flush=True)
qs = [0.158655, 0.5, 0.841345]
fitparams = inputparams["fitparams"]
distanceparams = inputparams["distanceparams"]
if use_gaussian_priors:
inputparams["fitparams"][filt] = [val, std]
return inputparams
# Get apparent magnitudes from input data
mobs = distanceparams["m"]
mobs_err = distanceparams["m_err"]
if len(mobs.keys()) == 0:
raise ValueError("No filters were given")
# Convert the inputted parallax in mas to as
plxobs = fitparams["parallax"][0] * 1e-3
plxobs_err = fitparams["parallax"][1] * 1e-3
L = udist.EDSD(None, None) * 1e3
fitparams.pop("parallax")
# Sample distances more densely around the mode of the distance distribution
# See Bailer-Jones 2015, Eq 19
coeffs = [1 / L, -2, plxobs / (plxobs_err**2), -1 / (plxobs_err**2)]
roots = np.roots(coeffs)
if np.sum((np.isreal(roots))) == 1:
(mode,) = np.real(roots[np.isreal(roots)])
else:
assert np.sum((np.isreal(roots))) == 3
if plxobs >= 0:
mode = np.amin(np.real(roots[np.isreal(roots)]))
else:
(mode,) = np.real(roots[roots > 0])
# By sampling linearly in quantiles, the probablity mass is equal for the samples
bla = scipy.stats.norm.cdf(0, loc=mode, scale=1000) + 0.01
dist = scipy.stats.norm.ppf(
np.linspace(bla, 0.96, n - n // 2), loc=mode, scale=1000
)
# We also want to sample across the entire range.
lindist = 10 ** np.linspace(-0.4, 4.4, n // 2)
assert np.all(np.isfinite(dist))
assert np.all(dist > 0)
dist = np.concatenate([dist, lindist])
dist = np.sort(dist)
lldist = udist.compute_distlikelihoods(
dist, plxobs, plxobs_err, L, outfilename=outfilename, debug=debug
)
dists = | np.repeat(dist, k) | numpy.repeat |
from __future__ import print_function
print ('?')
import torch
import numpy as np
from PIL import Image
import numpy as np
import os
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
#if normalize:
# image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
#else:
# image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = (image_numpy + 1) / 2.0
image_numpy = np.clip(image_numpy, 0, 1)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:,:,0]
return image_numpy
# Converts a one-hot tensor into a colorful label map
def tensor2label(label_tensor, n_label, imtype=np.uint8):
if n_label == 0:
return tensor2im(label_tensor, imtype)
label_tensor = label_tensor.cpu().float()
if label_tensor.size()[0] > 1:
label_tensor = label_tensor.max(0, keepdim=True)[1]
label_tensor = Colorize(n_label)(label_tensor)
#label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
label_numpy = label_tensor.numpy()
label_numpy = label_numpy / 255.0
return label_numpy
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
###############################################################################
# Code from
# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
# Modified so it complies with the Citscape label map colors
###############################################################################
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def labelcolormap(N):
if N == 35: # cityscape
cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81),
(128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
(180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0),
(107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70),
( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)],
dtype=np.uint8)
else:
cmap = | np.zeros((N, 3), dtype=np.uint8) | numpy.zeros |
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import pandas as pd
import os
# Read data
def main(min_df, max_df, path):
# Read stopwords
with open('stops.txt', 'r') as f:
stops = f.read().split('\n')
print('reading text file...')
processed_sentence = pd.read_table(path, names=['id', 'content_id', 'sentence', 'emotion label', 'date'])
docs = processed_sentence.sentence
# Create count vectorizer
print('counting document frequency of words...')
cvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=None)
cvz = cvectorizer.fit_transform(docs).sign()
# Get vocabulary
print('building the vocabulary...')
sum_counts = cvz.sum(axis=0)
v_size = sum_counts.shape[1]
sum_counts_np = np.zeros(v_size, dtype=int)
for v in range(v_size):
sum_counts_np[v] = sum_counts[0,v]
word2id = dict([(w, cvectorizer.vocabulary_.get(w)) for w in cvectorizer.vocabulary_])
id2word = dict([(cvectorizer.vocabulary_.get(w), w) for w in cvectorizer.vocabulary_])
del cvectorizer
print(' initial vocabulary size: {}'.format(v_size))
# Sort elements in vocabulary
idx_sort = np.argsort(sum_counts_np)
vocab_aux = [id2word[idx_sort[cc]] for cc in range(v_size)]
# Filter out stopwords (if any)
vocab_aux = [w for w in vocab_aux if w not in stops]
print(' vocabulary size after removing stopwords from list: {}'.format(len(vocab_aux)))
print(' vocabulary after removing stopwords: {}'.format(len(vocab_aux)))
# Create dictionary and inverse dictionary
vocab = vocab_aux
del vocab_aux
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
# Split in train/test/valid
print('tokenizing documents and splitting into train/test/valid...')
num_docs = cvz.shape[0]
trSize = int(np.floor(0.85*num_docs))
tsSize = int(np.floor(0.10*num_docs))
vaSize = int(num_docs - trSize - tsSize)
del cvz
idx_permute = np.random.permutation(num_docs).astype(int)
# Remove words not in train_data
vocab = list(set([w for idx_d in range(trSize) for w in docs[idx_permute[idx_d]].split() if w in word2id]))
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
print(' vocabulary after removing words not in train: {}'.format(len(vocab)))
docs_tr = [[word2id[w] for w in docs[idx_permute[idx_d]].split() if w in word2id] for idx_d in range(trSize)]
docs_ts = [[word2id[w] for w in docs[idx_permute[idx_d+trSize]].split() if w in word2id] for idx_d in range(tsSize)]
docs_va = [[word2id[w] for w in docs[idx_permute[idx_d+trSize+tsSize]].split() if w in word2id] for idx_d in range(vaSize)]
del docs
print(' number of documents (train): {} [this should be equal to {}]'.format(len(docs_tr), trSize))
print(' number of documents (test): {} [this should be equal to {}]'.format(len(docs_ts), tsSize))
print(' number of documents (valid): {} [this should be equal to {}]'.format(len(docs_va), vaSize))
# Remove empty documents
print('removing empty documents...')
def remove_empty(in_docs):
return [doc for doc in in_docs if doc!=[]]
docs_tr = remove_empty(docs_tr)
docs_ts = remove_empty(docs_ts)
docs_va = remove_empty(docs_va)
# Remove test documents with length=1
docs_ts = [doc for doc in docs_ts if len(doc)>1]
# Split test set in 2 halves
print('splitting test documents in 2 halves...')
docs_ts_h1 = [[w for i,w in enumerate(doc) if i<=len(doc)/2.0-1] for doc in docs_ts]
docs_ts_h2 = [[w for i,w in enumerate(doc) if i>len(doc)/2.0-1] for doc in docs_ts]
# Getting lists of words and doc_indices
print('creating lists of words...')
def create_list_words(in_docs):
return [x for y in in_docs for x in y]
words_tr = create_list_words(docs_tr)
words_ts = create_list_words(docs_ts)
words_ts_h1 = create_list_words(docs_ts_h1)
words_ts_h2 = create_list_words(docs_ts_h2)
words_va = create_list_words(docs_va)
print(' len(words_tr): ', len(words_tr))
print(' len(words_ts): ', len(words_ts))
print(' len(words_ts_h1): ', len(words_ts_h1))
print(' len(words_ts_h2): ', len(words_ts_h2))
print(' len(words_va): ', len(words_va))
# Get doc indices
print('getting doc indices...')
def create_doc_indices(in_docs):
aux = [[j for i in range(len(doc))] for j, doc in enumerate(in_docs)]
return [int(x) for y in aux for x in y]
doc_indices_tr = create_doc_indices(docs_tr)
doc_indices_ts = create_doc_indices(docs_ts)
doc_indices_ts_h1 = create_doc_indices(docs_ts_h1)
doc_indices_ts_h2 = create_doc_indices(docs_ts_h2)
doc_indices_va = create_doc_indices(docs_va)
print(' len(np.unique(doc_indices_tr)): {} [this should be {}]'.format(len(np.unique(doc_indices_tr)), len(docs_tr)))
print(' len(np.unique(doc_indices_ts)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts)), len(docs_ts)))
print(' len(np.unique(doc_indices_ts_h1)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts_h1)), len(docs_ts_h1)))
print(' len(np.unique(doc_indices_ts_h2)): {} [this should be {}]'.format(len( | np.unique(doc_indices_ts_h2) | numpy.unique |
"""
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..mean_los_velocity_vs_rp import mean_los_velocity_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_mean_los_velocity_vs_rp_correctness1', 'test_mean_los_velocity_vs_rp_correctness2',
'test_mean_los_velocity_vs_rp_correctness3', 'test_mean_los_velocity_vs_rp_correctness4',
'test_mean_los_velocity_vs_rp_parallel', 'test_mean_los_velocity_vs_rp_auto_consistency',
'test_mean_los_velocity_vs_rp_cross_consistency')
fixed_seed = 43
def pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rp_min, rp_max, pi_max, Lbox=None):
""" Brute force pure python function calculating mean los velocities
in a single bin of separation.
"""
if Lbox is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = Lbox, Lbox, Lbox
npts1, npts2 = len(sample1), len(sample2)
running_tally = []
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
dvz = velocities1[i, 2] - velocities2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
zsign_flip = -1
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
zsign_flip = -1
else:
zsign_flip = 1
d_rp = np.sqrt(dx*dx + dy*dy)
if (d_rp > rp_min) & (d_rp < rp_max) & (abs(dz) < pi_max):
if abs(dz) > 0:
vlos = dvz*dz*zsign_flip/abs(dz)
else:
vlos = dvz
running_tally.append(vlos)
if len(running_tally) > 0:
return np.mean(running_tally)
else:
return 0.
def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python():
""" This function tests that the
`~halotools.mock_observables.mean_radial_velocity_vs_r` function returns
results that agree with a brute force pure python implementation
for a random distribution of points, both with and without PBCs.
"""
npts = 99
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
sample2 = np.random.random((npts, 3))
velocities1 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
velocities2 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.2, 0.3]), 0.1
############################################
# Run the test with PBCs turned off
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
# ############################################
# # Run the test with PBCs operative
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (1, 0, 0.1), the second at (1, 0.2, 0.25).
The first set of points is moving at +50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving towards each other,
and so the relative z-velocity should be -50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -50
npts = 100
xc1, yc1, zc1 = 1, 0, 0.1
xc2, yc2, zc2 = 1, 0.2, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = 50.
rp_bins, pi_max = np.array([0, 0.1, 0.15, 0.21, 0.25]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s2[0:2], 0, rtol=0.01)
assert np.allclose(s2s2[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], 0, rtol=0.01)
assert np.allclose(s1s2[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[2], 0, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
assert np.allclose(s1s2[3], 0, rtol=0.01)
assert np.allclose(s2s2[3], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness2():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.2).
The first set of points is moving at -50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving away from each other,
and so the relative z-velocity should be +50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +50
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = -50.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness3():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.55, 0.1), the second at (0.5, 0.4, 0.95).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the second set of points is "gaining ground" on
the second set in the z-direction, and so the relative z-velocity
should be -20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -20
npts = 100
xc1, yc1, zc1 = 0.5, 0.55, 0.1
xc2, yc2, zc2 = 0.5, 0.4, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness4():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.05, 0.05, 0.3), the second at (0.95, 0.95, 0.4).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the first set of points is "losing ground" on
the second set in the z-direction, and so the relative z-velocity
should be +20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only one set or the other,
the auto-correlations should be 0 because each set of
points moves coherently.
Note that in this test, PBCs operate in both x & y directions
to identify pairs of points, but PBCs are irrelevant in the z-direction.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +20
npts = 100
xc1, yc1, zc1 = 0.05, 0.05, 0.3
xc2, yc2, zc2 = 0.95, 0.95, 0.4
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_parallel():
"""
Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns identical results for a random distribution of points whether the function
runs in parallel or serial.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1_parallel, s1s2_parallel, s2s2_parallel = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=2, period=1)
s1s1_serial, s1s2_serial, s2s2_serial = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=1, period=1)
assert np.allclose(s1s1_serial, s1s1_parallel, rtol=0.001)
assert np.allclose(s1s2_serial, s1s2_parallel, rtol=0.001)
assert np.allclose(s2s2_serial, s2s2_parallel, rtol=0.001)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_auto_consistency():
""" Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s1b, s2s2b = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_cross=False)
assert np.allclose(s1s1a, s1s1b, rtol=0.001)
assert np.allclose(s2s2a, s2s2b, rtol=0.001)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_cross_consistency():
""" Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns self-consistent cross-correlation results
regardless of whether we ask for auto-correlations.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s2b = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_auto=False)
assert | np.allclose(s1s2a, s1s2b, rtol=0.001) | numpy.allclose |
import numpy as np
import cv2
import os
import argparse
import glob
import math
import matplotlib.pyplot as plt
from ReadCameraModel import *
from UndistortImage import *
def rotationMatrixToEulerAngles(R) :
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x*180/math.pi, y*180/math.pi, z*180/math.pi])
def multiply_three(a, b, c):
return np.matmul(a, np.matmul(b, c))
def find_f(img1_pts, img2_pts):
M = np.array([[img1_pts[0,0]*img2_pts[0,0], img1_pts[0,0]*img2_pts[0,1], img1_pts[0,0], img1_pts[0,1]*img2_pts[0,0], img1_pts[0,1]*img2_pts[0,1], img1_pts[0,1], \
img2_pts[0,0], img2_pts[0,1], 1],\
[img1_pts[1,0]*img2_pts[1,0], img1_pts[1,0]*img2_pts[1,1], img1_pts[1,0], img1_pts[1,1]*img2_pts[1,0], img1_pts[1,1]*img2_pts[1,1], img1_pts[1,1], \
img2_pts[1,0], img2_pts[1,1], 1],\
[img1_pts[2,0]*img2_pts[2,0], img1_pts[2,0]*img2_pts[2,1], img1_pts[2,0], img1_pts[2,1]*img2_pts[2,0], img1_pts[2,1]*img2_pts[2,1], img1_pts[2,1], \
img2_pts[2,0], img2_pts[2,1], 1],\
[img1_pts[3,0]*img2_pts[3,0], img1_pts[3,0]*img2_pts[3,1], img1_pts[3,0], img1_pts[3,1]*img2_pts[3,0], img1_pts[3,1]*img2_pts[3,1], img1_pts[3,1], \
img2_pts[3,0], img2_pts[3,1], 1],\
[img1_pts[4,0]*img2_pts[4,0], img1_pts[4,0]*img2_pts[4,1], img1_pts[4,0], img1_pts[4,1]*img2_pts[4,0], img1_pts[4,1]*img2_pts[4,1], img1_pts[4,1], \
img2_pts[4,0], img2_pts[4,1], 1],\
[img1_pts[5,0]*img2_pts[5,0], img1_pts[5,0]*img2_pts[5,1], img1_pts[5,0], img1_pts[5,1]*img2_pts[5,0], img1_pts[5,1]*img2_pts[5,1], img1_pts[5,1], \
img2_pts[5,0], img2_pts[5,1], 1],\
[img1_pts[6,0]*img2_pts[6,0], img1_pts[6,0]*img2_pts[6,1], img1_pts[6,0], img1_pts[6,1]*img2_pts[6,0], img1_pts[6,1]*img2_pts[6,1], img1_pts[6,1], \
img2_pts[6,0], img2_pts[6,1], 1],\
[img1_pts[7,0]*img2_pts[7,0], img1_pts[7,0]*img2_pts[7,1], img1_pts[7,0], img1_pts[7,1]*img2_pts[7,0], img1_pts[7,1]*img2_pts[7,1], img1_pts[7,1], \
img2_pts[7,0], img2_pts[7,1], 1]])
U, S, Vh = | np.linalg.svd(M, full_matrices=True) | numpy.linalg.svd |
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
from scipy.special import erf
import fitsio
import glob
import os
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
from desitarget.io import read_targets_in_tiles
from desitarget.sv3 import sv3_targetmask
from LSS.Cosmo import distance
def tile2rosette(tile):
if tile < 433:
return (tile-1)//27
else:
if tile >= 433 and tile < 436:
return 13
if tile >= 436 and tile < 439:
return 14
if tile >= 439 and tile < 442:
return 15
if tile >= 442 and tile <=480:
return (tile-442)//3
if tile > 480:
return tile//30
return 999999 #shouldn't be any more?
def calc_rosr(rosn,ra,dec):
#given rosetter number and ra,dec, calculate distance from center
roscen = {0:(150.100,2.182),1:(179.6,0),2:(183.1,0),3:(189.9,61.8),4:(194.75,28.2)\
,5:(210.0,5.0),6:(215.5,52.5),7:(217.8,34.4),8:(216.3,-0.6),9:(219.8,-0.6)\
,10:(218.05,2.43),11:(242.75,54.98),12:(241.05,43.45),13:(245.88,43.45),14:(252.5,34.5)\
,15:(269.73,66.02),16:(194.75,24.7),17:(212.8,-0.6),18:(269.73,62.52),19:(236.1,43.45)}
ra = ra*np.pi/180.
dec = dec*np.pi/180.
rac,decc = roscen[rosn]
rac = rac*np.pi/180.
decc = decc*np.pi/180.
cd = np.sin(dec)*np.sin(decc)+np.cos(dec)*np.cos(decc)*np.cos(rac-ra)
ad = np.arccos(cd)*180./np.pi
if ad > 2.5:
print(rosn,ra,dec,rac,decc)
return ad
def combtile_spec(tiles,outf='',rel='daily'):
s = 0
n = 0
if os.path.isfile(outf):
specd = Table.read(outf)
s = 1
tdone = np.unique(specd['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile,zdate in zip(tiles[tmask]['TILEID'],tiles[tmask]['LASTNIGHT']):
zdate = str(zdate)
tspec = combspecdata(tile,zdate,rel=rel)
tspec['TILEID'] = tile
if s == 0:
specd = tspec
s = 1
else:
specd = vstack([specd,tspec],metadata_conflicts='silent')
specd.sort('TARGETID')
kp = (specd['TARGETID'] > 0)
specd = specd[kp]
n += 1
print(tile,n,len(tiles[tmask]),len(specd))
specd.write(outf,format='fits', overwrite=True)
def combspecdata(tile,zdate,specroot='/global/cfs/cdirs/desi/spectro/redux/',rel='daily' ):
#put data from different spectrographs together, one table for fibermap, other for z
coaddir=specroot+rel+'/tiles/cumulative/'
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
fitsio.read(ff)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
print('spectrographs with data:')
print(specs)
if len(specs) == 0:
return None
tspec = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tn = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tspec = vstack([tspec,tn],metadata_conflicts='silent')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
#tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBER','FIBERSTATUS','PRIORITY','FA_TARGET','FA_TYPE',\
#'OBJTYPE','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','NIGHT','EXPID','MJD','SV3_DESI_TARGET','SV3_BGS_TARGET'])
tspec = join(tspec,tf,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
tspec = join(tspec,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
print(len(tspec),len(tf))
#tspec['LOCATION'] = tf['LOCATION']
#tspec['FIBERSTATUS'] = tf['FIBERSTATUS']
#tspec['PRIORITY'] = tf['PRIORITY']
return tspec
def combfibmap(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
return tf
def combfibmap_and_scores(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
tf = join(tf,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
return tf
def goodlocdict(tf):
'''
Make a dictionary to map between location and priority
tf should come from combspecdata above
'''
wloc = tf['FIBERSTATUS'] == 0
print(str(len(tf[wloc])) + ' locations with FIBERSTATUS 0')
goodloc = tf[wloc]['LOCATION']
pdict = dict(zip(tf['LOCATION'], tf['PRIORITY'])) #to be used later for randoms
return pdict,goodloc
def cutphotmask(aa,bits):
print(str(len(aa)) +' before imaging veto' )
keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0)
for biti in bits:
keep &= ((aa['MASKBITS'] & 2**biti)==0)
aa = aa[keep]
print(str(len(aa)) +' after imaging veto' )
return aa
def combtiles_wdup(tiles,mdir='',fout='',tarcol=['RA','DEC','TARGETID','SV3_DESI_TARGET','SV3_BGS_TARGET','SV3_MWS_TARGET','SUBPRIORITY','PRIORITY_INIT','TARGET_STATE','TIMESTAMP','ZWARN','PRIORITY']):
s = 0
n = 0
if os.path.isfile(fout):
tarsn = Table.read(fout)
s = 1
tdone = np.unique(tarsn['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ts = str(tile).zfill(6)
faf = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz'
fht = fitsio.read_header(faf)
wt = tiles['TILEID'] == tile
#tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'])
tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'],columns=tarcol)
#tars.keep_columns(tarcols)
#tars = tars[[b for b in tarcol]]
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
tars = join(tars,tt,keys=['TARGETID'])
tars['TILEID'] = tile
tars['ZWARN'].name = 'ZWARN_MTL'
if s == 0:
tarsn = tars
s = 1
else:
tarsn = vstack([tarsn,tars],metadata_conflicts='silent')
tarsn.sort('TARGETID')
n += 1
print(tile,n,len(tiles[tmask]),len(tarsn))
tarsn.write(fout,format='fits', overwrite=True)
def gettarinfo_type(faf,tars,goodloc,pdict,tp='SV3_DESI_TARGET'):
#get target info
#in current files on SVN, TARGETS has all of the necessary info on potential assignments
#no more, so commented out
#tt = Table.read(faf,hdu='TARGETS')
#tt.keep_columns(['TARGETID','FA_TARGET','FA_TYPE','PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
#if len(tt) != len(tfa):
# print('!!!mismatch between targets and potential assignments, aborting!!!')
# return None
#tt = join(tt,tfa,keys=['TARGETID'])
wgt = (np.isin(tt['LOCATION'],goodloc))
print(str(len(np.unique(tt[wgt]['LOCATION']))) + ' good locations')
print('comparison of number targets, number of targets with good locations')
print(len(tt),len(tt[wgt]))
tt = tt[wgt]
tt = join(tt,tars,keys=['TARGETID'],table_names = ['_AVAIL', ''], uniq_col_name='{col_name}{table_name}')
#Mark targets that actually got assigned fibers
tfall = Table.read(faf,hdu='FIBERASSIGN')
tfall.keep_columns(['TARGETID','LOCATION','PRIORITY'])
tt = join(tt,tfall,keys=['TARGETID'],join_type='left',table_names = ['', '_ASSIGNED'], uniq_col_name='{col_name}{table_name}')
wal = tt['LOCATION_ASSIGNED']*0 == 0
tt['LOCATION'][wal] = tt['LOCATION_ASSIGNED'][wal]
tt['LOCATION_AVAIL'][wal] = tt['LOCATION_ASSIGNED'][wal]
#print('differences between assigned locations')
#print(np.unique(tt['LOCATION_AVAIL'][wal]-tt['LOCATION_ASSIGNED'][wal]))
#print(tt.columns)
tt = unique(tt,keys=['TARGETID']) #cut to unique target ids
#print(tarf)
#tars = Table.read(tarf)
#tars.remove_columns(['Z','ZWARN'])#,'PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
#we want to get these from the zbest file that is specific to the tile and thus when it was observed
#tfa = unique(tfa[wgt],keys=['TARGETID'])
#wtype = ((tt[tp] & 2**tarbit) > 0) #don't cut by type here any more
#tt = tt[wtype]
#tfa = join(tfa,tt,keys=['TARGETID'])
#tft = join(tft,tt,keys=['TARGETID'])
#print(str(len(tfa)) +' unique targets with good locations and at '+str(len(np.unique(tfa['LOCATION'])))+' unique locations and '+str(len(tft))+ ' total unique targets at '+str(len(np.unique(tft['LOCATION']))) +' unique locations ')
#wgl = np.isin(tfa['LOCATION_ASSIGNED'],goodloc)
#wtype = ((tfa[tp] & 2**tarbit) > 0)
#wtfa = wgl & wtype
#print('number of assigned fibers at good locations '+str(len(tfa[wtfa])))
wal = tt['LOCATION_ASSIGNED']*0 == 0
print('number of assigned fibers '+str(len(tt[wal])))
print('number of unique target id '+str(len(np.unique(tt[wal]['TARGETID']))))
print('max priority of assigned '+str(np.max(tt[wal]['PRIORITY_ASSIGNED'])))
#tt[wal]['LOCATION'] = tt[wal]['LOCATION_ASSIGNED']
#tt[wal]['LOCATION_AVAIL'] = tt[wal]['LOCATION_ASSIGNED']
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#print('are location_avail and location_assigned the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION_ASSIGNED'], tt[wal]['LOCATION_AVAIL']))
tt['LOCATION_ASSIGNED'] = np.zeros(len(tt),dtype=int)
tt['LOCATION_ASSIGNED'][wal] = 1
wal = tt['LOCATION_ASSIGNED'] == 1
print('number of assigned fibers '+str(len(tt[wal]))+' (check to match agrees with above)')
wal = tt['LOCATION']*0 == 0
print('number of locations from z file '+str(len(tt[wal]))+' (check to match agrees with above)')
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#tt['PRIORITY_ASSIGNED'] = np.vectorize(pdict.__getitem__)(tt['LOCATION'])
return tt
def find_znotposs(dz):
dz.sort('TARGETID')
tidnoz = []
tids = np.unique(dz['TARGETID'])
ti = 0
i = 0
print('finding targetids that were not observed')
while i < len(dz):
za = 0
while dz[i]['TARGETID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tidnoz.append(tids[ti])
if ti%30000 == 0:
print(ti)
ti += 1
selnoz = np.isin(dz['TARGETID'],tidnoz)
tidsb = np.unique(dz[selnoz]['TILELOCID'])
#dz = dz[selnoz]
dz.sort('TILELOCID')
tids = np.unique(dz['TILELOCID'])
print('number of targetids with no obs '+str(len(tidnoz)))
tlidnoz = []
lznposs = []
ti = 0
i = 0
while i < len(dz):
za = 0
while dz[i]['TILELOCID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tlidnoz.append(tids[ti])
#if np.isin(tids[ti],tidsb):
# lznposs.append(tids[ti])
if ti%30000 == 0:
print(ti,len(tids))
ti += 1
#the ones to veto are now the join of the two
wtbtlid = np.isin(tlidnoz,tidsb)
tlidnoz = np.array(tlidnoz)
lznposs = tlidnoz[wtbtlid]
print('number of locations where assignment was not possible because of priorities '+str(len(lznposs)))
return lznposs
def count_tiles_better(fs,dr,pd,rann=0,specrel='daily',fibcol='COADD_FIBERSTATUS'):
'''
from files with duplicates that have already been sorted by targetid, quickly go
through and get the multi-tile information
dr is either 'dat' or 'ran'
returns file with TARGETID,NTILE,TILES,TILELOCIDS
'''
#fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_specwdup_Alltiles.fits')
#wf = fs['FIBERSTATUS'] == 0
wf = fs[fibcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
if dr == 'dat':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_tarspecwdup_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/datcomb_'+pd+'ntileinfo.fits'
if dr == 'ran':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/rancomb_'+str(rann)+pd+'wdupspec_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random'+str(rann)+'/rancomb_'+pd+'ntileinfo.fits'
wg = np.isin(fj['TILELOCID'],gtl)
fjg = fj[wg]
tids = np.unique(fjg['TARGETID'])
nloc = []#np.zeros(len(np.unique(f['TARGETID'])))
nt = []
tl = []
tli = []
ti = 0
i = 0
while i < len(fjg):
tls = []
tlis = []
nli = 0
while fjg[i]['TARGETID'] == tids[ti]:
nli += 1
tls.append(fjg[i]['TILEID'])
tlis.append(fjg[i]['TILELOCID'])
i += 1
if i == len(fjg):
break
nloc.append(nli)
tlsu = np.unique(tls)
tlisu = np.unique(tlis)
nt.append(len(tlsu))
tl.append("-".join(tlsu.astype(str)))
tli.append("-".join(tlisu.astype(str)))
if ti%100000 == 0:
print(ti)
ti += 1
tc = Table()
tc['TARGETID'] = tids
tc['NTILE'] = nt
tc['TILES'] = tl
tc['TILELOCIDS'] = tli
return tc
def count_tiles(tiles,catdir,pd,ttp='ALL',imask=False):
'''
For list of tileids, simply track the tiles a target shows up as available in
pd is dark or bright
just output targetid and tiles, meant to be matched to other processing
don't worry about what was assigned, purpose is to just count tile overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun.keep_columns(['TARGETID','TILELOCID'])
print(len(fgun),len(np.unique(fgun['TARGETID'])))
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILES'] = aa
ai = np.chararray(len(fgun),unicode=True,itemsize=300)
tlids = np.copy(fgun['TILELOCID']).astype('<U300')
fgun['TILELOCIDS'] = tlids
if s == 0:
fgu = fgun
s =1
else:
fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu = unique(fgu,keys='TARGETID')#,keep='last')
#I think this works when the ordering is the same; things got messed up other places with sorts
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles),len(fgu))
cnt += 1
fu = fgu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#np.unique()#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print(np.unique(fu['TILES']))
fu.write(catdir+'Alltiles_'+pd+'_tilelocs.dat.fits',format='fits', overwrite=True)
def combtiles(tiles,catdir,tp,tmask,tc='SV3_DESI_TARGET',ttp='ALL',imask=False):
'''
For list of tileids, combine data generated per tile , taking care of overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
if tp != 'dark' and tp != 'bright':
wt = (fgun[tc] & tmask[tp]) > 0
fgun = fgun[wt]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun['TILELOCID_ASSIGNED'] = np.zeros(len(fgun))
wm = fgun['LOCATION_ASSIGNED'] == 1
fgun['TILELOCID_ASSIGNED'][wm] = fgun['TILELOCID'][wm]
nl,nla = countloc(fgun)
fgun['ZPOSS'] = np.zeros(len(fgun)).astype(int)
if tp != 'dark' and tp != 'bright':
#fgun['LOC_NOTBLOCK'] = np.zeros(len(fgun)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
was = ~np.isin(fgun['LOCATION_AVAIL'],locsna)
#fgun['LOC_NOTBLOCK'][was] = 1
wg = was
fgun['ZPOSS'][wg] = 1
#fgun.sort('ZPOSS')
#aa = np.chararray(len(fgun),unicode=True,itemsize=100)
#aa[:] = str(tile)
fgun['TILE'] = int(tile)
#fgun['TILES'] = aa
#tlids = np.copy(fgun['TILELOCID']).astype('<U300')
#fgun['TILELOCIDS'] = tlids
#print('sum of assigned,# of unique TILELOCID (should match)')
#print(np.sum(fgun['LOCATION_ASSIGNED'] == 1),len(np.unique(fgun['TILELOCID'])))
#ai = np.chararray(len(fgun),unicode=True,itemsize=300)
#
#
if s == 0:
fgu = fgun
s =1
else:
#fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
#wn = fgu['PRIORITY_ASSIGNED']*0 != 0
#wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
#fgu[wn]['PRIORITY_ASSIGNED'] = 0
#fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED'] #create this column so assigned always show up in order of highest priority
#wa = fgu['LOCATION_ASSIGNED'] == 1
#wa &= fgu['PRIORITY_ASSIGNED'] >= 2000 #this was put SV2 to ignore BGS repeats
#fa = fgu[wa]
#print(len(fa),len(np.unique(fa['TARGETID'])))
#fgu.sort('sort')
#fgu = unique(fgu,keys='TARGETID',keep='last')
#dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
#didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
#fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if tp != 'dark' and tp != 'bright':
# fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
# fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
#aa[:] = '-'+str(tile)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
#fgu['TILES'][didsc] = ms #add the tile info
#aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
#aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
#fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles))#,np.sum(fgu['LOCATION_ASSIGNED']),len(fgu),len(np.unique(fgu['TILELOCID'])),np.sum(fgu['ZPOSS']))#,np.unique(fgu['TILELOCIDS'])
cnt += 1
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#tlids = np.copy(fgu['TILELOCID']).astype('<U300')
#fgu['TILELOCIDS'] = tlids
tsnrcol = 'TSNR2_'+tp
if tp == 'ELG_HIP':
tsnrcol = 'TSNR2_ELG'
if tp == 'BGS_ANY':
tsnrcol = 'TSNR2_BGS'
wt = (fgu[tsnrcol] == 1e20) | (fgu[tsnrcol]*0 != 0)
print('number with bad tsnrcol is '+str(len(fgu[wt])))
fgu[tsnrcol][wt] = 0
wn = fgu['PRIORITY_ASSIGNED']*0 != 0
wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
fgu[wn]['PRIORITY_ASSIGNED'] = 0
fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED']*fgu[tsnrcol] #create this column so assigned always show up in order of highest priority
if tp != 'dark' and tp != 'bright':
#wa = fgu['LOCATION_ASSIGNED'] == 1
#print('ZPOSS for LOCATION_ASSIGNED = 1:')
#print(np.unique(fgu[wa]['ZPOSS']))
fgu['sort'] = fgu['sort']*fgu['ZPOSS']-fgu['ZPOSS']
wa = fgu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fgu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fgu['ZPOSS'] == 1
natloc = ~np.isin(fgu[wp]['TILELOCID'],loclz)
print('number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
fgu.sort('sort')
#fgu.sort('ZPOSS')
fu = unique(fgu,keys='TARGETID')#,keep='last')
tidsu = fu['TARGETID']#[wp][natloc]
tids = fgu['TARGETID']
if tp != 'dark' and tp != 'bright':
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
nalz = ~np.isin(fu['TILELOCID'],loclz)
natloc = wp & nalz#~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
tlocs = fgu['TILELOCID']
ntl = []
ch = 0
bl = 0
print(len(tidsu),len(natloc))
for ii in range(0,len(tidsu)):
#if wp[ii] & natloc[ii]:
if natloc[ii]:
bl += 1
tid = tidsu[ii]
wt = tids == tid
tls = tlocs[wt]
s = 0
for tl in tls:
if s == 0:
if np.isin(tl,loclz):
#wu = fu['TARGETID'] == tid
fu[ii]['TILELOCID'] = tl
#ntl.append(tl)
ch += 1
s = 1
if ii%10000 == 0:
print(ii,len(tidsu),ch,bl)
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
natloc = ~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique and reassignment, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
#print(len(np.unique(fgu['TARGETID'])),np.sum(fgu['LOCATION_ASSIGNED']))
# tiles = fgu['TILES']
# tilesu = fu['TILES']
# tlids = fgu['TILELOCIDS']
# tlidsu = fu['TILELOCIDS']
#
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# otl = tlidsu[ii]
# tt = tiles[wt]
# tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# for ti in tti:
# if ti != otl:
# tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
# fu['TILELOCIDS'] = tlidsu
#
# #wa = fu['LOCATION_ASSIGNED'] == 1
# #wa &= fu['PRIORITY_ASSIGNED'] >= 2000
print(np.sum(fu['LOCATION_ASSIGNED']))
#need to resort tile string
# fl = np.chararray(len(fu),unicode=True,itemsize=100)
# for ii in range(0,len(fu)):
# tl = fu['TILES'][ii]
# tls = tl.split('-')#.astype('int')
# tli = tls[0]
# if len(tls) > 1:
# #tls = tls.astype('int')
# tls.sort()
# tli = tls[0]
# for i in range(1,len(tls)):
# tli += '-'+tls[i]
# #else:
# # tli = tls
# #print(tli)
# fl[ii] = tli
#
# fu['TILES'] = fl
#print(np.unique(fu['TILES']))
# print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
#fu.write(catdir+tp+'Alltiles_'+pd+'_full.dat.fits',format='fits', overwrite=True)
fu.write(catdir+'/datcomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def countloc(aa):
locs = aa['LOCATION_AVAIL']
locsa = aa['LOCATION_ASSIGNED']
la = np.max(locs)+1
nl = np.zeros(la)
nla = np.zeros(la)
for i in range(0,len(aa)):
nl[locs[i]] += 1
nla[locs[i]] += locsa[i]
return nl,nla
def combran_wdup(tiles,rann,randir,tp,sv3dir,specf,keepcols=[]):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
outf = randir+str(rann)+'/rancomb_'+tp+'wdup_Alltiles.fits'
if os.path.isfile(outf):
fgu = Table.read(outf)
#tarsn.keep_columns(['RA','DEC','TARGETID''LOCATION','FIBER','TILEID'])
s = 1
tdone = np.unique(fgu['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fa = Table.read(ffa,hdu='FAVAIL')
ffna = Table.read(ffna)
fgun = join(fa,ffna,keys=['TARGETID'])
#fgun.remove_columns(delcols)
td += 1
fgun['TILEID'] = int(tile)
fgun.keep_columns(['RA','DEC','TARGETID','LOCATION','FIBER','TILEID'])
if s == 0:
fgu = fgun
s = 1
else:
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu.sort('TARGETID')
print(tile,td, len(tiles), len(fgun),len(fgu))
else:
print('did not find '+ffa)
if len(tiles[tmask]['TILEID']) > 0:
fgu.write(outf,format='fits', overwrite=True)
#specf = Table.read(sv3dir+'datcomb_'+tp+'_specwdup_Alltiles.fits')
specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION']
specf.keep_columns(keepcols)
#specf.keep_columns(['ZWARN','LOCATION','TILEID','TILELOCID','FIBERSTATUS','FIBERASSIGN_X','FIBERASSIGN_Y','PRIORITY','DELTA_X','DELTA_Y','EXPTIME','PSF_TO_FIBER_SPECFLUX','TSNR2_ELG_B','TSNR2_LYA_B','TSNR2_BGS_B','TSNR2_QSO_B','TSNR2_LRG_B','TSNR2_ELG_R','TSNR2_LYA_R','TSNR2_BGS_R','TSNR2_QSO_R','TSNR2_LRG_R','TSNR2_ELG_Z','TSNR2_LYA_Z','TSNR2_BGS_Z','TSNR2_QSO_Z','TSNR2_LRG_Z','TSNR2_ELG','TSNR2_LYA','TSNR2_BGS','TSNR2_QSO','TSNR2_LRG'])
fgu = join(fgu,specf,keys=['LOCATION','TILEID','FIBER'])
fgu.sort('TARGETID')
outf = sv3dir+'/rancomb_'+str(rann)+tp+'wdupspec_Alltiles.fits'
print(outf)
fgu.write(outf,format='fits', overwrite=True)
def combran(tiles,rann,randir,ddir,tp,tmask,tc='SV3_DESI_TARGET',imask=False):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
for tile,zdate in zip(tiles['TILEID'],tiles['ZDATE']):
tspec = combfibmap_and_scores(tile,zdate)
pdict,gloc = goodlocdict(tspec)
tspec.keep_columns(['LOCATION','FIBERSTATUS','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','TSNR2_ELG','TSNR2_LRG','TSNR2_QSO','TSNR2_BGS'])
dt = ddir+'ALL'+str(tile)+'_full.dat.fits'
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fd = Table.read(dt)
# print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
#gloc = np.unique(fd['LOCATION_AVAIL']) #bad locations already removed from this files
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd),len(gloc))
if tp != 'dark' and tp != 'bright':
wt = (fd[tc] & tmask[tp]) > 0
fd = fd[wt]
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
nl,nla = countloc(fd)
#commenting out zfailure stuff, not vetoing randoms based on that
#wzf = fd['ZWARN'] != 0
#wzf &= fd['ZWARN'] != 999999
#wzf &= fd['ZWARN']*0 == 0
#loc_fail = np.unique(fd[wzf]['LOCATION'])
#print('number of zfail locations',len(loc_fail))
#
#print(np.sum(fd['LOCATION_ASSIGNED']),len(np.unique(fd['LOCATION_AVAIL'])),np.sum(nla),np.sum(nl))
#
#find the locations that were requested by type but not assigned
fa = Table.read(ffa,hdu='FAVAIL')
wg = np.isin(fa['LOCATION'],gloc)
fa = fa[wg]
fa = join(fa,tspec,keys=['LOCATION'],join_type='left')
#fa['FIBER_GOOD'] = np.zeros(len(fa)).astype(int)
#fa['FIBER_GOOD'][wg] = 1
#fa['Z_NOTBAD'] = np.zeros(len(fa)).astype(int)
#wnzf = ~np.isin(fa['LOCATION'],loc_fail)
#fa['Z_NOTBAD'][wnzf] = 1
fa['ZPOSS'] = np.zeros(len(fa)).astype(int)
#fa['ZPOSSNOTBAD'] = np.zeros(len(fa)).astype(int)
if tp != 'dark' and tp != 'bright':
#fa['LOC_NOTBLOCK'] = np.zeros(len(fa)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
ntloc = len(gloc)-len(locsna)#-len(loc_fail)
print('total number of assignable positions',ntloc)
was = ~np.isin(fa['LOCATION'],locsna)
#fa['LOC_NOTBLOCK'][was] = 1
#wg &= was
fa['ZPOSS'][was] = 1
#fa['ZPOSSNOTBAD'][was&wnzf] = 1
#if maskzfail:
# wg &= wnzf
#wzt = wpr & ~wzf & ~wna
#fg = fa[wg]
#print(len(fa),np.sum(fa['ZPOSSNOTBAD']))
#fg = fa
#print('before,after vetoing locations:')
#print(len(fa),len(fg))
#if tp != 'dark' and tp != 'bright':
# fa.sort('ZPOSS')
#else:
# fg.sort('FIBER_GOOD')
fgun = unique(fa,keys=['TARGETID'],keep='last')
ffna = Table.read(ffna)
fgun = join(fgun,ffna,keys=['TARGETID'])
fgun.remove_columns(delcols)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
print(tile,td, len(tiles), str(len(fgun))+' unique new randoms')
td += 1
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILE'] = int(tile)
fgun['TILES'] = aa
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION']
if s == 0:
fgu = fgun
s = 1
else:
fv = vstack([fgu,fgun],metadata_conflicts='silent')
fgo = fgu.copy()
fgu = unique(fv,keys='TARGETID')#,keep='last')
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if this works, can save vetoing until the end
fgu['TSNR2_ELG'][didsc] = np.maximum(fgu['TSNR2_ELG'][didsc],fgun['TSNR2_ELG'][dids])
fgu['TSNR2_QSO'][didsc] = np.maximum(fgu['TSNR2_QSO'][didsc],fgun['TSNR2_QSO'][dids])
fgu['TSNR2_BGS'][didsc] = np.maximum(fgu['TSNR2_BGS'][didsc],fgun['TSNR2_BGS'][dids])
fgu['TSNR2_LRG'][didsc] = np.maximum(fgu['TSNR2_LRG'][didsc],fgun['TSNR2_LRG'][dids])
if tp != 'dark' and tp != 'bright':
#fgu['FIBER_GOOD'][didsc] = np.maximum(fgu['FIBER_GOOD'][didsc],fgun['FIBER_GOOD'][dids])
#fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
#fgu['Z_NOTBAD'][didsc] = np.maximum(fgu['Z_NOTBAD'][didsc],fgun['Z_NOTBAD'][dids])
fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#fgu['ZPOSSNOTBAD'][didsc] = np.maximum(fgu['ZPOSSNOTBAD'][didsc],fgun['ZPOSSNOTBAD'][dids])
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
print(str(len(fgu))+' unique total randoms')
else:
print('did not find '+ffa)
#fgu.sort('ZPOSS')
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#fu = unique(fgu,keys=['TARGETID'])#,keep='last')
fu = fgu
#fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
#return True
# tiles = fgu['TILES']
# tilesu = fu['TILES']
#tlids = fgu['TILELOCIDS']
#tlidsu = fu['TILELOCIDS']
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# #otl = tlidsu[ii]
# tt = tiles[wt]
# #tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# #for ti in tti:
# # if ti != otl:
# # tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
#fu['TILELOCIDS'] = tlidsu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
NT = np.zeros(len(fgu))
ros = np.zeros(len(fgu))
print('counting tiles and finding rosette')
for ii in range(0,len(fu['TILES'])): #not sure why, but this only works when using loop for Table.read but array option works for fitsio.read
NT[ii] = np.char.count(fu['TILES'][ii],'-')+1
ti = int(fu['TILES'][ii].split('-')[0])
ros[ii] = tile2rosette(ti)
fu['NTILE'] = NT
fu['rosette_number'] = ros
print(np.unique(fu['rosette_number'],return_counts=True))
fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def mkfullran(fs,indir,rann,imbits,outf,tp,pd,bit,desitarg='SV3_DESI_TARGET',tsnr= 'TSNR2_ELG',notqso='',qsobit=4,fbcol='COADD_FIBERSTATUS'):
'''
indir is directory with inputs
rann is the random file number (0-17)
imbits are the maskbits for the imaging veto mask
outf is the name (including full path) of the output file
tp is the target type
pd is the program, dark or bright
bit is the bit to use to select to the target type
randir doesn't get used anymore
desitarg is the column to use to select the target type
tsnr is the tsnr2 used for this sample
'''
#first, need to find locations to veto based on data
#the same is done in mkfulldat
#fs = fitsio.read(indir+'datcomb_'+pd+'_specwdup_Alltiles.fits')
wf = fs[fbcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
#gtl now contains the list of good locations
#we now want to load in the bigger data file with all the target info
#we use it to find the locations where observations of the given type were not possible and then mask them
zf = indir+'datcomb_'+pd+'_tarspecwdup_Alltiles.fits'
dz = Table.read(zf)
wtype = ((dz[desitarg] & bit) > 0)
if notqso == 'notqso':
wtype &= ((dz[desitarg] & qsobit) == 0)
wg = np.isin(dz['TILELOCID'],gtl)
dz = dz[wtype&wg]
print('length after selecting type and fiberstatus == 0 '+str(len(dz)))
lznp = find_znotposs(dz)
#lznp will later be used to veto
#load in random file
zf = indir+'/rancomb_'+str(rann)+pd+'wdupspec_Alltiles.fits'
dz = Table.read(zf)
#load in tileloc info for this random file and join it
zfpd = indir+'/rancomb_'+str(rann)+pd+'_Alltilelocinfo.fits'
dzpd = Table.read(zfpd)
dz = join(dz,dzpd,keys=['TARGETID'])
print('length before cutting to good positions '+str(len(dz)))
#cut to good and possible locations
wk = ~np.isin(dz['TILELOCID'],lznp)
wk &= np.isin(dz['TILELOCID'],gtl)
dz = dz[wk]
print('length after cutting to good positions '+str(len(dz)))
#get all the additional columns desired from original random files through join
tarf = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random'+str(rann)+'/alltilesnofa.fits')
delcols = ['RA','DEC','DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT','NUMOBS_INIT','SCND_TARGET',\
'NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
tarf.remove_columns(delcols)
dz = join(dz,tarf,keys=['TARGETID'])
#apply imaging vetos
dz = cutphotmask(dz,imbits)
print('length after cutting to based on imaging veto mask '+str(len(dz)))
#sort by tsnr, like done for data, so that the highest tsnr are kept
dz.sort(tsnr)
dz = unique(dz,keys=['TARGETID'],keep='last')
print('length after cutting to unique TARGETID '+str(len(dz)))
dz['rosette_number'] = 0
dz['rosette_r'] = 0
for ii in range(0,len(dz)):
rosn = tile2rosette(dz[ii]['TILEID'])
rosd = calc_rosr(rosn,dz[ii]['RA'],dz[ii]['DEC']) #calculates distance in degrees from the rosette center
dz[ii]['rosette_number'] = rosn
dz[ii]['rosette_r'] = rosd
print(np.unique(dz['NTILE']))
dz.write(outf,format='fits', overwrite=True)
def mkfulldat(fs,zf,imbits,tdir,tp,bit,outf,ftiles,azf='',desitarg='SV3_DESI_TARGET',specver='daily',notqso='',qsobit=4,bitweightfile=None):
'''
zf is the name of the file containing all of the combined spec and target info compiled already
imbits is the list of imaging mask bits to mask out
tdir is the directory for the targets
tp is the target type
bit is the SV3_{type}_MASK bit to use for select the correct target type
outf is the full path + name for the output file
ftiles is the name of the file containing information on, e.g., how many tiles each target was available on
azf is the file name for OII flux info (relevant for ELGs only)
desitarg is the column to use for the target type cut (all use SV3_DESI_TARGET except BGS_BRIGHT)
specver is the version of the pipeline used for the redshift info; only 'daily' exists for now
'''
#from desitarget.mtl import inflate_ledger
if tp[:3] == 'BGS' or tp[:3] == 'MWS':
pd = 'bright'
tscol = 'TSNR2_BGS'
else:
pd = 'dark'
tscol = 'TSNR2_ELG'
#load in the appropriate dark/bright combined spec file and use to denote the tileid + location that had good observations:
#fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specver+'/datcomb_'+pd+'_specwdup_Alltiles.fits')
if specver == 'daily':
fbcol = 'FIBERSTATUS'
if specver == 'everest':
fbcol = 'COADD_FIBERSTATUS'
wf = fs[fbcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
#gtl now contains the list of 'good' tilelocid
#read in the big combined data file
dz = Table.read(zf)
#find the rows that satisfy the target type
wtype = ((dz[desitarg] & bit) > 0)
if notqso == 'notqso':
print('removing QSO targets')
wtype &= ((dz[desitarg] & qsobit) == 0)
#find the rows that are 'good' tilelocid
wg = np.isin(dz['TILELOCID'],gtl)
print(len(dz[wtype]))
print(len(dz[wg]))
#down-select to target type of interest and good tilelocid
dz = dz[wtype&wg]
print('length after selecting type and fiberstatus == 0 '+str(len(dz)))
print('length of unique targetid after selecting type and fiberstatus == 0 '+str(len(np.unique(dz['TARGETID']))))
#find targets that were never available at the same location as a target of the same type that got assigned to a good location
#those that were never available are assumed to have 0 probability of assignment so we want to veto this location
lznp = find_znotposs(dz)
wk = ~np.isin(dz['TILELOCID'],lznp)#dz['ZPOSS'] == 1
dz = dz[wk] #0 probability locations now vetoed
print('length after priority veto '+str(len(dz)))
print('joining to full imaging')
ftar = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+pd+'_targets.fits')
ftar.keep_columns(['TARGETID','EBV','FLUX_G','FLUX_R','FLUX_Z','FLUX_IVAR_G','FLUX_IVAR_R','FLUX_IVAR_Z','MW_TRANSMISSION_G','MW_TRANSMISSION_R',\
'MW_TRANSMISSION_Z','FRACFLUX_G','FRACFLUX_R','FRACFLUX_Z','FRACMASKED_G','FRACMASKED_R','FRACMASKED_Z','FRACIN_G','FRACIN_R',\
'FRACIN_Z','NOBS_G','NOBS_R','NOBS_Z','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G','GALDEPTH_R','GALDEPTH_Z','FLUX_W1',\
'FLUX_W2','FLUX_IVAR_W1','FLUX_IVAR_W2','MW_TRANSMISSION_W1','MW_TRANSMISSION_W2','ALLMASK_G','ALLMASK_R','ALLMASK_Z','FIBERFLUX_G',\
'FIBERFLUX_R','FIBERFLUX_Z','FIBERTOTFLUX_G','FIBERTOTFLUX_R','FIBERTOTFLUX_Z','WISEMASK_W1','WISEMASK_W2','MASKBITS',\
'RELEASE','BRICKID','BRICKNAME','BRICK_OBJID','MORPHTYPE','PHOTSYS'])
dz = join(dz,ftar,keys=['TARGETID'])
print('length after join to full targets (should be same) '+str(len(dz)))
#apply imaging veto mask
dz = cutphotmask(dz,imbits)
#load in file with information about where repeats occurred and join it
dtl = Table.read(ftiles)
dtl.keep_columns(['TARGETID','NTILE','TILES','TILELOCIDS'])
dz = join(dz,dtl,keys='TARGETID')
#find the rows where we have spectroscopic observations
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
#mark them as having LOCATION_ASSIGNED
dz['LOCATION_ASSIGNED'] = np.zeros(len(dz)).astype('bool')
dz['LOCATION_ASSIGNED'][wz] = 1
#find the TILELOCID that were assigned and mark them as so
tlids = np.unique(dz['TILELOCID'][wz])
wtl = np.isin(dz['TILELOCID'],tlids)
dz['TILELOCID_ASSIGNED'] = 0
dz['TILELOCID_ASSIGNED'][wtl] = 1
print('number of unique targets at assigned tilelocid:')
print(len(np.unique(dz[wtl]['TARGETID'])))
#get OII flux info for ELGs
if tp == 'ELG' or tp == 'ELG_HIP':
if azf != '':
arz = fitsio.read(azf,columns=[fbcol,'TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR','SUBSET','DELTACHI2'])
st = []
for i in range(0,len(arz)):
st.append(arz['SUBSET'][i][:4])
st = np.array(st)
wg = arz[fbcol] == 0
wg &= st == "thru"
arz = arz[wg]
o2c = np.log10(arz['OII_FLUX'] * np.sqrt(arz['OII_FLUX_IVAR']))+0.2*np.log10(arz['DELTACHI2'])
w = (o2c*0) != 0
w |= arz['OII_FLUX'] < 0
o2c[w] = -20
#arz.keep_columns(['TARGETID','LOCATION','TILEID','o2c','OII_FLUX','OII_SIGMA'])#,'Z','ZWARN','TSNR2_ELG'])
arz = Table(arz)
arz['o2c'] = o2c
dz = join(dz,arz,keys=['TARGETID','LOCATION','TILEID'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['', '_OII'])
dz.remove_columns(['SUBSET','DELTACHI2_OII',fbcol+'_OII'])
print('check length after merge with OII strength file:' +str(len(dz)))
if tp[:3] == 'QSO':
if azf != '':
arz = Table.read(azf)
arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','ZERR','Z_QN'])
print(arz.dtype.names)
#arz['TILE'].name = 'TILEID'
dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
dz['Z'].name = 'Z_RR' #rename the original redrock redshifts
dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
#sort and then cut to unique targetid; sort prioritizes observed targets and then TSNR2
dz['sort'] = dz['LOCATION_ASSIGNED']*dz[tscol]+dz['TILELOCID_ASSIGNED']
dz.sort('sort')
dz = unique(dz,keys=['TARGETID'],keep='last')
if tp == 'ELG' or tp == 'ELG_HIP':
print('number of masked oII row (hopefully matches number not assigned) '+ str(np.sum(dz['o2c'].mask)))
if tp == 'QSO':
print('number of good z according to qso file '+str(len(dz)-np.sum(dz['Z'].mask)))
print('length after cutting to unique targetid '+str(len(dz)))
print('LOCATION_ASSIGNED numbers')
print(np.unique(dz['LOCATION_ASSIGNED'],return_counts=True))
print('TILELOCID_ASSIGNED numbers')
print(np.unique(dz['TILELOCID_ASSIGNED'],return_counts=True))
probl = np.zeros(len(dz))
#get completeness based on unique sets of tiles
compa = []
tll = []
ti = 0
print('getting completenes')
#sorting by tiles makes things quicker with while statements below
dz.sort('TILES')
nts = len(np.unique(dz['TILES']))
tlsl = dz['TILES']
tlslu = np.unique(tlsl)
laa = dz['LOCATION_ASSIGNED']
i = 0
while i < len(dz):
tls = []
tlis = []
nli = 0
nai = 0
while tlsl[i] == tlslu[ti]:
nli += 1 #counting unique targetids within the given TILES value
nai += laa[i] #counting the number assigned
i += 1
if i == len(dz):
break
if ti%1000 == 0:
print('at tiles '+str(ti)+' of '+str(nts))
cp = nai/nli #completeness is number assigned over number total
compa.append(cp)
tll.append(tlslu[ti])
ti += 1
#turn the above into a dictionary and apply it
comp_dicta = dict(zip(tll, compa))
fcompa = []
for tl in dz['TILES']:
fcompa.append(comp_dicta[tl])
dz['COMP_TILE'] = np.array(fcompa)
wc0 = dz['COMP_TILE'] == 0
print('number of targets in 0 completeness regions '+str(len(dz[wc0])))
#get counts at unique TILELOCID
locl,nlocl = | np.unique(dz['TILELOCID'],return_counts=True) | numpy.unique |
import copy
from collections import deque
import numpy as np
from domainbed.lib import swa_utils
class SWADBase:
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
raise NotImplementedError()
def get_final_model(self):
raise NotImplementedError()
class IIDMax(SWADBase):
"""SWAD start from iid max acc and select last by iid max swa acc"""
def __init__(self, evaluator, **kwargs):
self.iid_max_acc = 0.0
self.swa_max_acc = 0.0
self.avgmodel = None
self.final_model = None
self.evaluator = evaluator
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
if self.iid_max_acc < val_acc:
self.iid_max_acc = val_acc
self.avgmodel = swa_utils.AveragedModel(segment_swa.module, rm_optimizer=True)
self.avgmodel.start_step = segment_swa.start_step
self.avgmodel.update_parameters(segment_swa.module)
self.avgmodel.end_step = segment_swa.end_step
# evaluate
accuracies, summaries = self.evaluator.evaluate(self.avgmodel)
results = {**summaries, **accuracies}
prt_fn(results, self.avgmodel)
swa_val_acc = results["train_out"]
if swa_val_acc > self.swa_max_acc:
self.swa_max_acc = swa_val_acc
self.final_model = copy.deepcopy(self.avgmodel)
def get_final_model(self):
return self.final_model
class LossValley(SWADBase):
"""IIDMax has a potential problem that bias to validation dataset.
LossValley choose SWAD range by detecting loss valley.
"""
def __init__(self, evaluator, n_converge, n_tolerance, tolerance_ratio, **kwargs):
"""
Args:
evaluator
n_converge: converge detector window size.
n_tolerance: loss min smoothing window size
tolerance_ratio: decision ratio for dead loss valley
"""
self.evaluator = evaluator
self.n_converge = n_converge
self.n_tolerance = n_tolerance
self.tolerance_ratio = tolerance_ratio
self.converge_Q = deque(maxlen=n_converge)
self.smooth_Q = deque(maxlen=n_tolerance)
self.final_model = None
self.converge_step = None
self.dead_valley = False
self.threshold = None
def get_smooth_loss(self, idx):
smooth_loss = min([model.end_loss for model in list(self.smooth_Q)[idx:]])
return smooth_loss
@property
def is_converged(self):
return self.converge_step is not None
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
if self.dead_valley:
return
frozen = copy.deepcopy(segment_swa)
frozen.end_loss = val_loss
self.converge_Q.append(frozen)
self.smooth_Q.append(frozen)
if not self.is_converged:
if len(self.converge_Q) < self.n_converge:
return
min_idx = | np.argmin([model.end_loss for model in self.converge_Q]) | numpy.argmin |
import numpy as np
from sklearn.metrics import accuracy_score
import multiprocessing as mp
import sys, os
sys.path.append(os.getcwd())
try:
from quadboost.weak_learner import _WeakLearnerBase
from quadboost.utils import split_int, timed, ComparableMixin
from quadboost.utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
except ModuleNotFoundError:
from weak_learner import _WeakLearnerBase
from utils import split_int, timed, ComparableMixin
from utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
class MulticlassDecisionStump(_WeakLearnerBase):
"""
Decision stump classifier with innate multiclass algorithm.
It finds a stump to partition examples into 2 parts which minimizes the quadratic multiclass risk.
It assigns a confidence rates (scalar) for each class for each partition.
Parallelization is implemented for the 'fit' method.
"""
def fit(self, X, Y, W=None, n_jobs=1, sorted_X=None, sorted_X_idx=None):
"""
Fits the model by finding the best decision stump using the algorithm implemented in the StumpFinder class.
Args:
X (Array of shape (n_examples, ...)): Examples
Y (Array of shape (n_examples,) or (n_examples, n_classes)): Labels for the examples. If an encoder was provided at construction, Y should be a vector to be encoded.
W (Array of shape (n_examples, n_classes)): Weights of each examples according to their class. Should be None if Y is not encoded.
n_jobs (int, optional, default=1): Number of processes to execute in parallel to find the stump.
sorted_X (Array of shape (n_examples, ...), optional, default=None): Sorted examples along axis 0. If None, 'X' will be sorted, else it will not.
sorted_X_idx (Array of shape (n_examples, ...), optional, default=None): Indices of the sorted examples along axis 0 (corresponds to argsort). If None, 'X' will be argsorted, else it will not.
Returns self
"""
if self.encoder is not None:
Y, W = self.encoder.encode_labels(Y)
if sorted_X is None or sorted_X_idx is None:
sorted_X, sorted_X_idx = self.sort_data(X)
stump = self.find_stump(sorted_X, sorted_X_idx, Y, W, n_jobs)
for attr in ['feature', 'confidence_rates', 'stump', 'stump_idx', 'risks', 'risk']:
setattr(self, attr, getattr(stump, attr))
return self
def find_stump(self, sorted_X, sorted_X_idx, Y, W, n_jobs):
stump_finder = StumpFinder(sorted_X, sorted_X_idx, Y, W)
stumps_queue = SafeQueue()
if n_jobs > 1: # Need parallelization
n_features = sorted_X.shape[1]
args_iter = ((stumps_queue, sub_idx) for sub_idx in split_int(n_features, n_jobs))
parallel_processes(stump_finder.safe_find_stump, args_iter)
else: # No parallelization
stump_finder.find_stump(stumps_queue)
return min(stump for stump in stumps_queue)
def predict(self, X):
n_partitions, n_classes = self.confidence_rates.shape
n_examples = X.shape[0]
Y_pred = np.zeros((n_examples, n_classes))
for i, partition in enumerate(self.partition_generator(X)):
Y_pred[i] = self.confidence_rates[partition]
return Y_pred
def partition_generator(self, X):
"""
Partition examples into 2 sets denoted by 0 and 1 in an lazy iterator fashion.
"""
n_examples = X.shape[0]
for x in X.reshape((n_examples, -1)):
yield int(x[self.feature] > self.stump)
def partition(self, X, dtype=bool):
return np.array([p for p in self.partition_generator(X)], dtype=dtype)
@staticmethod
def sort_data(X):
"""
Necessary sorting operations on the data to find the optimal stump. It is useful to sort the data prior to boost to speed up the algorithm, since the sorting step will not be made at each round.
'sorted_X' and 'sorted_X_idx' should be passed as keyword arguments to the 'fit' method to speed up the algorithm.
"""
X = X.reshape((X.shape[0],-1))
n_examples, n_features = X.shape
sorted_X_idx = np.argsort(X, axis=0)
sorted_X = X[sorted_X_idx, range(n_features)]
return sorted_X, sorted_X_idx
class StumpFinder:
"""
Implements the algorithm to find the stump. It is separated from the class MulticlassDecisionStump so that it can be pickled when parallelized with 'multiprocessing' (which uses pickle).
"""
def __init__(self, sorted_X, sorted_X_idx, Y, W):
# multiprocessing Arrays are shared between processed to alleviate pickling
self.sorted_X = np.ctypeslib.as_array(mp.RawArray('d', sorted_X.size)).reshape(sorted_X.shape)
self.sorted_X[:] = sorted_X
self.sorted_X_idx = np.ctypeslib.as_array(mp.RawArray('i', sorted_X_idx.size)).reshape(sorted_X_idx.shape)
self.sorted_X_idx[:] = sorted_X_idx
self.zeroth_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.zeroth_moments[:] = W
self.first_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.first_moments[:] = W*Y
self.second_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.second_moments[:] = self.first_moments*Y
# # multiprocessing Arrays are shared between processed to alleviate pickling
# self.X_shape = sorted_X.shape
# self.X_idx_shape = sorted_X_idx.shape
# self.moments_shape = W.shape
# self.sorted_X = mp.Array('d', sorted_X.reshape(-1))
# self.sorted_X_idx = mp.Array('i', sorted_X_idx.reshape(-1))
# self.zeroth_moments = mp.Array('d', W.reshape(-1))
# self.first_moments = mp.Array('d', (W*Y).reshape(-1))
# self.second_moments = mp.Array('d', (W*Y*Y).reshape(-1))
def safe_find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Handles exception raised in a subprocess so the script will not hang indefinitely.
This is basically a decorator for find_stump, but parallelizing requires pickling, and decorators cannot be pickled.
"""
with stumps_queue: # Context manager handles exceptions
self.find_stump(stumps_queue, sub_idx)
def find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Algorithm to the best stump within the sub array of X specified by the bounds 'sub_idx'.
"""
X = self.sorted_X[:,slice(*sub_idx)]
X_idx = self.sorted_X_idx[:,slice(*sub_idx)]
_, n_classes = self.zeroth_moments.shape
n_examples, n_features = X.shape
n_partitions = 2
n_moments = 3
moments = np.zeros((n_moments, n_partitions, n_features, n_classes))
# At first, all examples are in partition 1
# Moments are not normalized so they can be computed cumulatively
moments[0,1] = np.sum(self.zeroth_moments[X_idx[:,0]], axis=0)
moments[1,1] = np.sum(self.first_moments[X_idx[:,0]], axis=0)
moments[2,1] = np.sum(self.second_moments[X_idx[:,0]], axis=0)
risks = self.compute_risks(moments) # Shape (n_partitions, n_features)
best_stump = Stump(risks, moments)
for i, row in enumerate(X_idx[:-1]):
self.update_moments(moments, row)
possible_stumps = ~np.isclose(X[i+1] - X[i], 0)
if possible_stumps.any():
risk = self.compute_risks(moments[:,:,possible_stumps,:])
best_stump.update(risk, moments, possible_stumps, stump_idx=i+1)
best_stump.compute_stump_value(X)
best_stump.feature += sub_idx[0] if sub_idx[0] is not None else 0
stumps_queue.append(best_stump)
def update_moments(self, moments, row_idx):
moments_update = np.array([self.zeroth_moments[row_idx],
self.first_moments[row_idx],
self.second_moments[row_idx]])
moments[:,0] += moments_update
moments[:,1] -= moments_update
def compute_risks(self, moments):
"""
Computes the risks for each partitions for every features.
"""
moments[np.isclose(moments,0)] = 0
with np.errstate(divide='ignore', invalid='ignore'):
# We could use
# np.divide(moments[1]**2, moments[0], where=~np.isclose(moments[0]))
# However, the buffer size is not big enough for several examples and the resulting division is not done correctly
normalized_m1 = np.nan_to_num(moments[1]**2/moments[0])
risks = | np.sum(moments[2] - normalized_m1, axis=2) | numpy.sum |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from utils import utils_sr
import torch
from argparse import ArgumentParser
from utils.utils_restoration import rgb2y, psnr, array2tensor, tensor2array
import sys
from matplotlib.ticker import MaxNLocator
class PnP_restoration():
def __init__(self, hparams):
self.hparams = hparams
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.initialize_cuda_denoiser()
def initialize_cuda_denoiser(self):
'''
Initialize the denoiser model with the given pretrained ckpt
'''
sys.path.append('../GS_denoising/')
from lightning_denoiser import GradMatch
parser2 = ArgumentParser(prog='utils_restoration.py')
parser2 = GradMatch.add_model_specific_args(parser2)
parser2 = GradMatch.add_optim_specific_args(parser2)
hparams = parser2.parse_known_args()[0]
if 'nb_4' in self.hparams.pretrained_checkpoint :
hparams.DRUNET_nb = 4
hparams.grad_matching = self.hparams.grad_matching
hparams.act_mode = 's'
self.denoiser_model = GradMatch(hparams)
checkpoint = torch.load(self.hparams.pretrained_checkpoint, map_location=self.device)
self.denoiser_model.load_state_dict(checkpoint['state_dict'])
self.denoiser_model.eval()
for i, v in self.denoiser_model.named_parameters():
v.requires_grad = False
self.denoiser_model = self.denoiser_model.to(self.device)
if self.hparams.precision == 'double' :
if self.denoiser_model is not None:
self.denoiser_model.double()
def initialize_prox(self, img, degradation):
'''
calculus for future prox computatations
:param img: degraded image
:param degradation: 2D blur kernel for deblurring and SR, mask for inpainting
'''
if self.hparams.degradation_mode == 'deblurring' :
self.k = degradation
self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)
self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate_prox(img, self.k_tensor, 1)
elif self.hparams.degradation_mode == 'SR':
self.k = degradation
self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)
self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate_prox(img, self.k_tensor, 2)
elif self.hparams.degradation_mode == 'inpainting':
self.M = array2tensor(degradation).double().to(self.device)
self.My = self.M*img
else:
print('degradation mode not treated')
def calculate_prox(self, img):
'''
Calculation of the proximal mapping of the data term f
:param img: input for the prox
:return: prox_f(img)
'''
if self.hparams.degradation_mode == 'deblurring':
rho = torch.tensor([1/self.hparams.lamb]).double().repeat(1, 1, 1, 1).to(self.device)
px = utils_sr.prox_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, 1)
elif self.hparams.degradation_mode == 'SR':
rho = torch.tensor([1 /self.hparams.lamb]).double().repeat(1, 1, 1, 1).to(self.device)
px = utils_sr.prox_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, self.hparams.sf)
elif self.hparams.degradation_mode == 'inpainting':
if self.hparams.noise_level_img > 1e-2:
px = (self.hparams.lamb*self.My + img)/(self.hparams.lamb*self.M+1)
else :
px = self.My + (1-self.M)*img
else:
print('degradation mode not treated')
return px
def calculate_grad(self, img):
'''
Calculation of the gradient of the data term f
:param img: input for the prox
:return: \nabla_f(img)
'''
if self.hparams.degradation_mode == 'deblurring' :
grad = utils_sr.grad_solution(img.double(), self.FB, self.FBC, self.FBFy, 1)
if self.hparams.degradation_mode == 'SR' :
grad = utils_sr.grad_solution(img.double(), self.FB, self.FBC, self.FBFy, self.hparams.sf)
return grad
def calculate_regul(self,y,x,g):
'''
Calculation of the regularization (1/tau)*phi_sigma(y)
:param y: Point where to evaluate
:param x: D^{-1}(y)
:param g: Precomputed regularization function value at x
:return: regul(y)
'''
regul = (1 / self.hparams.lamb) * (g - (1 / 2) * torch.norm(x - y, p=2) ** 2)
return regul
def calulate_data_term(self,y,img):
'''
Calculation of the data term value f(y)
:param y: Point where to evaluate F
:param img: Degraded image
:return: f(y)
'''
if self.hparams.degradation_mode == 'deblurring':
deg_y = utils_sr.imfilter(y.double(), self.k_tensor[0].double().flip(1).flip(2).expand(3, -1, -1, -1))
f = 0.5 * torch.norm(img - deg_y, p=2) ** 2
elif self.hparams.degradation_mode == 'SR':
deg_y = utils_sr.imfilter(y.double(), self.k_tensor[0].double().flip(1).flip(2).expand(3, -1, -1, -1))
deg_y = deg_y[..., 0::self.hparams.sf, 0::self.hparams.sf]
f = 0.5 * torch.norm(img - deg_y, p=2) ** 2
elif self.hparams.degradation_mode == 'inpainting':
deg_y = self.M * y.double()
f = 0.5 * torch.norm(img - deg_y, p=2) ** 2
else:
print('degradation not implemented')
return f
def calculate_F(self, y, x, g, img):
'''
Calculation of the objective function value f(y) + (1/tau)*phi_sigma(y)
:param y: Point where to evaluate F
:param x: D^{-1}(y)
:param g: Precomputed regularization function value at x
:param img: Degraded image
:return: F(y)
'''
regul = self.calculate_regul(y,x,g)
if self.hparams.no_data_term:
F = regul
else:
f = self.calulate_data_term(y,img)
F = f + regul
return F.item()
def calculate_lyapunov_DRS(self,y,z,x,g,img):
'''
Calculation of the Lyapunov function value Psi(x)
:param x: Point where to evaluate F
:param y,z: DRS iterations initialized at x
:param g: Precomputed regularization function value at x
:param img: Degraded image
:return: Psi(x)
'''
regul = self.calculate_regul(y,x,g)
f = self.calulate_data_term(z, img)
Psi = regul + f + (1 / self.hparams.lamb) * (torch.sum(torch.mul(y-x,y-z)) + (1/2) * torch.norm(y - z, p=2) ** 2)
return Psi
def restore(self, img, init_im, clean_img, degradation,extract_results=False):
'''
Compute GS-PnP restoration algorithm
:param img: Degraded image
:param clean_img: ground-truth clean image
:param degradation: 2D blur kernel for deblurring and SR, mask for inpainting
:param extract_results: Extract information for subsequent image or curve saving
'''
if extract_results:
y_list, z_list, x_list, Dg_list, psnr_tab, g_list, Dx_list, F_list, Psi_list = [], [], [], [], [], [], [], [], []
i = 0 # iteration counter
img_tensor = array2tensor(init_im).to(self.device) # for GPU computations (if GPU available)
self.initialize_prox(img_tensor, degradation) # prox calculus that can be done outside of the loop
# Initialization of the algorithm
if self.hparams.degradation_mode == 'SR':
x0 = cv2.resize(init_im, (img.shape[1] * self.hparams.sf, img.shape[0] * self.hparams.sf),interpolation=cv2.INTER_CUBIC)
x0 = utils_sr.shift_pixel(x0, self.hparams.sf)
x0 = array2tensor(x0).to(self.device)
else:
x0 = array2tensor(init_im).to(self.device)
if extract_results: # extract np images and PSNR values
out_x = tensor2array(x0.cpu())
current_x_psnr = psnr(clean_img, out_x)
if self.hparams.print_each_step:
print('current x PSNR : ', current_x_psnr)
psnr_tab.append(current_x_psnr)
x_list.append(out_x)
x = x0
if self.hparams.use_hard_constraint:
x = torch.clamp(x, 0, 1)
# Initialize Lyapunov
diff_Psi = 1
Psi_old = 1
Psi = Psi_old
while i < self.hparams.maxitr and abs(diff_Psi)/Psi_old > self.hparams.relative_diff_Psi_min:
if self.hparams.inpainting_init :
if i < self.hparams.n_init:
self.sigma_denoiser = 50
else :
self.sigma_denoiser = self.hparams.sigma_denoiser
else :
self.sigma_denoiser = self.hparams.sigma_denoiser
x_old = x
Psi_old = Psi
if self.hparams.PnP_algo == 'PGD':
# Gradient step
gradx = self.calculate_grad(x_old)
z = x_old - self.hparams.lamb*gradx
# Denoising step
torch.set_grad_enabled(True)
Dg, N = self.denoiser_model.calculate_grad(z, self.hparams.sigma_denoiser / 255.)
torch.set_grad_enabled(False)
Dg = Dg.detach()
N = N.detach()
g = 0.5 * (torch.norm(z.double() - N.double(), p=2) ** 2)
Dz = z - Dg
Dx = Dz
x = (1 - self.hparams.alpha) * z + self.hparams.alpha*Dz
y = x
# Hard constraint
if self.hparams.use_hard_constraint:
x = torch.clamp(x,0,1)
# Calculate Objective
F = self.calculate_F(x, z, g, img_tensor)
elif self.hparams.PnP_algo == 'DRS':
# Denoising step
torch.set_grad_enabled(True)
Dg, N = self.denoiser_model.calculate_grad(x_old, self.hparams.sigma_denoiser / 255.)
torch.set_grad_enabled(False)
Dg = Dg.detach()
N = N.detach()
g = 0.5 * (torch.norm(x_old.double() - N.double(), p=2) ** 2)
Dx = x_old - Dg
y = (1 - self.hparams.alpha)*x_old + self.hparams.alpha*Dx
# Hard constraint
if self.hparams.use_hard_constraint:
y = torch.clamp(y,0,1)
# Proximal step
z = self.calculate_prox(2*y-x_old)
# Calculate Lyapunov
Psi = self.calculate_lyapunov_DRS(y,z,x,g,img_tensor)
diff_Psi = Psi-Psi_old
# Calculate Objective
F = self.calculate_F(y, x, g, img_tensor)
# Final step
x = x_old + (z-y)
elif self.hparams.PnP_algo == 'DRSdiff':
# Proximal step
y = self.calculate_prox(x_old)
y2 = 2*y-x_old
# Denoising step
torch.set_grad_enabled(True)
Dg, N = self.denoiser_model.calculate_grad(y2, self.hparams.sigma_denoiser / 255.)
torch.set_grad_enabled(False)
Dg = Dg.detach()
N = N.detach()
g = 0.5 * (torch.norm(y2.double() - N.double(), p=2) ** 2)
Dx = y2 - Dg
z = (1 - self.hparams.alpha) * y2 + self.hparams.alpha * Dx
# Hard constraint
if self.hparams.use_hard_constraint:
z = torch.clamp(z, 0, 1)
# Calculate Lyapunov
Psi = self.calculate_lyapunov_DRS(y,z,x,g,img_tensor)
diff_Psi = Psi-Psi_old
# Calculate Objective
F = self.calculate_F(y, x, g, img_tensor)
# Final step
x = x_old + (z-y)
else :
print('algo not implemented')
# Logging
if extract_results:
out_y = tensor2array(y.cpu())
out_z = tensor2array(z.cpu())
out_x = tensor2array(x.cpu())
current_y_psnr = psnr(clean_img, out_y)
current_z_psnr = psnr(clean_img, out_z)
current_x_psnr = psnr(clean_img, out_x)
if self.hparams.print_each_step:
print('iteration : ', i)
print('current y PSNR : ', current_y_psnr)
print('current z PSNR : ', current_z_psnr)
print('current x PSNR : ', current_x_psnr)
y_list.append(out_y)
x_list.append(out_x)
z_list.append(out_z)
Dx_list.append(tensor2array(Dx.cpu()))
Dg_list.append(torch.norm(Dg).cpu().item())
g_list.append(g.cpu().item())
psnr_tab.append(current_x_psnr)
F_list.append(F)
Psi_list.append(Psi)
# next iteration
i += 1
output_img = tensor2array(y.cpu())
output_psnr = psnr(clean_img, output_img)
output_psnrY = psnr(rgb2y(clean_img), rgb2y(output_img))
if extract_results:
return output_img, output_psnr, output_psnrY, x_list, np.array(z_list), np.array(y_list), np.array(Dg_list), np.array(psnr_tab), np.array(Dx_list), np.array(g_list), np.array(F_list), np.array(Psi_list)
else:
return output_img, output_psnr, output_psnrY
def initialize_curves(self):
self.conv = []
self.PSNR = []
self.g = []
self.Dg = []
self.F = []
self.Psi = []
self.lip_algo = []
self.lip_D = []
self.lip_Dg = []
def update_curves(self, x_list, Dx_list, psnr_tab, Dg_list, g_list, F_list, Psi_list):
self.F.append(F_list)
self.Psi.append(Psi_list)
self.g.append(g_list)
self.Dg.append(Dg_list)
self.PSNR.append(psnr_tab)
self.conv.append(np.array([(np.linalg.norm(x_list[k + 1] - x_list[k]) ** 2) for k in range(len(x_list) - 1)]) / np.sum(np.abs(x_list[0]) ** 2))
self.lip_algo.append(np.sqrt(np.array([np.sum(np.abs(x_list[k + 1] - x_list[k]) ** 2) for k in range(1, len(x_list) - 1)]) / np.array([np.sum( | np.abs(x_list[k] - x_list[k - 1]) | numpy.abs |
from keras.utils import to_categorical, Sequence
from rdkit import Chem
from rdkit.Chem import rdmolops, AllChem
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import math
from sklearn import preprocessing
from model.utils_ import read_csv, read_csv2, read_griddata, normalized_laplacian, normalize_adj, scaled_laplacian, adjacency, gen_conformer
from scipy.spatial import cKDTree
from grid import scaffoldSplit as scaffoldsplit
def one_hot(x, allowable_set):
# If x is not in allowed set, use last index
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
class grid_Dataset(object):
def __init__(self, dataset, batch=128):
self.dataset = dataset
self.task = "binary"
self.target_name = "active"
self.max_atoms = 3
self.batch = batch
self.outputs = 1
self.smiles = []
self.mols = []
self.coords = []
self.target = []
self.rlist = []
self.gridx = []
self.x, self.y, self.grid3d = {}, {}, {}
self.gridshape = ()
self.use_atom_symbol = True
self.use_degree = True
self.use_hybridization = True
self.use_implicit_valence = True
self.use_partial_charge = False
self.use_formal_charge = True
self.use_ring_size = True
self.use_hydrogen_bonding = True
self.use_acid_base = True
self.use_aromaticity = True
self.use_chirality = True
self.use_num_hydrogen = True
# Load data
self.load_grid_dataset()
# Normalize
if self.task == "regression":
self.mean = np.mean(self.y["train"])
self.std = np.std(self.y["train"])
self.y["train"] = (self.y["train"] - self.mean) / self.std
self.y["valid"] = (self.y["valid"] - self.mean) / self.std
self.y["test"] = (self.y["test"] - self.mean) / self.std
else:
self.mean = 0
self.std = 1
def load_grid_dataset(self):
# Dataset parameters
if self.dataset == "bace_reg" or self.dataset == "delaney" or self.dataset == "freesolv":
self.task = "regression"
# self.target_name = "target"
elif self.dataset == "hiv":
self.task = "binary"
else:
pass
if self.dataset == "delaney":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_delaney11")
elif self.dataset == "freesolv":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_freesolv_rotate_5")
elif self.dataset == "hiv":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_hiv_rotate")
t = np.array(grid_y)
t1=0
t2=0
for h in range(len(t)):
if t[h]==0:
t1=t1+1
elif t[h]==1:
t2 = t2 + 1
elif self.dataset == "tox21_NR-AR":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-AR_rotate")
elif self.dataset == "tox21_NR-AR-LBD":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-AR-LBD_rotate")
elif self.dataset == "tox21_NR-AhR":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-AhR_rotate")
elif self.dataset == "tox21_NR-Aromatase":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-Aromatase_rotate")
elif self.dataset == "tox21_NR-ER":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-ER_rotate")
elif self.dataset == "tox21_NR-ER-LBD":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-ER-LBD_rotate")
elif self.dataset == "tox21_NR-PPAR-gamma":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_NR-PPAR-gamma_rotate")
elif self.dataset == "tox21_SR-ARE":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_SR-ARE_rotate")
elif self.dataset == "tox21_SR-ATAD5":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_SR-ATAD5_rotate")
elif self.dataset == "tox21_SR-HSE":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_SR-HSE_rotate")
elif self.dataset == "tox21_SR-MMP":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_SR-MMP_rotate")
elif self.dataset == "tox21_SR-p53":
grid_x, grid_y, grid_smiles, sample_shape = read_griddata("gridMols/grid3Dmols_tox21_SR-p53_rotate")
self.smiles, self.gridx, self.gridy, self.gridshape = np.array(grid_smiles), np.array(grid_x), np.array( grid_y), sample_shape
if self.dataset == "hiv":
train_inds, valid_inds, test_inds = scaffoldsplit.ScaffoldSplitter().train_valid_test_split(self.gridx, self.gridy, self.smiles)
train_smiles = self.smiles[train_inds]
train_gridy = self.gridy[train_inds]
train_grid3d = self.gridx[train_inds]
np.random.seed(66)
index_train = np.random.permutation(len(train_smiles))
valid_smiles = self.smiles[valid_inds]
valid_gridy = self.gridy[valid_inds]
valid_grid3d = self.gridx[valid_inds]
index_valid = np.random.permutation(len(valid_smiles))
test_smiles = self.smiles[test_inds]
test_gridy = self.gridy[test_inds]
test_grid3d = self.gridx[test_inds]
index_test = np.random.permutation(len(test_smiles))
self.x = {"train": train_smiles[index_train],
"valid": valid_smiles[index_valid],
"test": test_smiles[index_test]}
self.y = {"train": train_gridy[index_train],
"valid": valid_gridy[index_valid],
"test": test_gridy[index_test]}
self.grid3d = {"train": train_grid3d[index_train],
"valid": valid_grid3d[index_valid],
"test": test_grid3d[index_test]}
else:
# Shuffle data
idx = np.random.permutation(len(self.smiles))
self.smiles, self.gridx, self.gridy = self.smiles[idx], self.gridx[idx], self.gridy[idx]
# Split data
spl1 = int(len(self.smiles) * 0.2)
spl2 = int(len(self.smiles) * 0.1)
self.x = {"train": self.smiles[spl1:],
"valid": self.smiles[spl2:spl1],
"test": self.smiles[:spl2]}
self.y = {"train": self.gridy[spl1:],
"valid": self.gridy[spl2:spl1],
"test": self.gridy[:spl2]}
self.grid3d = {"train": self.gridx[spl1:],
"valid":self.gridx[spl2:spl1],
"test":self.gridx[:spl2]}
print("aa")
def save_dataset(self, path, pred=None, target="test", filename=None):
mols = []
# for idx, (smile, y) in enumerate(zip(self.t_smiles[target], self.y[target])):
# smile.SetProp("true", str(y * self.std + self.mean))
# # smile.SetProp("smiles", self.smiles[idx])
# # smile.SetProp("name", self.x[target][idx])
# if pred is not None:
# smile.SetProp("pred", str(pred[idx][0] * self.std + self.mean))
# mols.append(smile)
#
# if filename is not None:
# w = Chem.SDWriter(path + filename + ".sdf")
# else:
# w = Chem.SDWriter(path + target + ".sdf")
# for mol in mols:
# if mol is not None:
# w.write(mol)
def replace_dataset(self, path, subset="test", target_name="target"):
x, c, y = [], [], []
mols = Chem.SDMolSupplier(path)
for mol in mols:
if mol is not None:
# Multitask
if type(target_name) is list:
y.append([float(mol.GetProp(t)) if t in mol.GetPropNames() else -1 for t in target_name])
self.outputs = len(self.target_name)
# Singletask
elif target_name in mol.GetPropNames():
_y = float(mol.GetProp(target_name))
if _y == -1:
continue
else:
y.append(_y)
else:
continue
x.append(mol)
c.append(mol.GetConformer().GetPositions())
# Normalize
x = | np.array(x) | numpy.array |
import logging
import math
import numpy as np
from pyproj import CRS
import rasterio
logger = logging.getLogger(__name__)
KM = 1000.0
MILES = 1609.34
# TODO
# optional use of rasterio data ; extract proj / ellipsoid from it
# TODO handle nodata (0 dans le TIFF IGN) ;
# TIFF IGN EPSG:2154 - RGF93 / Lambert-93 - Projected
# TODO support negative elevation angle
def horizon(latlon, raster, distance=25 * KM, precision=1, height=0):
if raster.crs.is_projected:
raise Exception("Only geographic CRS are supported")
crs = CRS.from_wkt(raster.crs.to_wkt())
ellipsoid = crs.ellipsoid
logger.debug("Extracting data...")
study_area, study_transform = _extract_data(latlon, raster, distance, ellipsoid)
lats, lons = _pixel_positions(study_area, study_transform)
y_obs, x_obs = rasterio.transform.rowcol(study_transform, latlon[1], latlon[0])
logger.debug("Computing azimuths...")
# Azimuth of all pixel corners seen from observer point
# bottom right corners
lat_res, lon_res = _resolution(study_transform)
azimuth = _azimuth(latlon, lats - lat_res / 2, lons + lon_res / 2, ellipsoid)
# Corresponding elevation angle
# add observer height
z_obs = study_area[y_obs, x_obs] + height
lon_grid, lat_grid = np.meshgrid(lons, lats, sparse=False)
logger.debug("Computing elevations...")
elevation = _elevation_angle(
z_obs,
study_area,
latlon,
lat_grid,
lon_grid,
ellipsoid,
)
logger.debug(f"Computing sun mask... precision={precision}")
helevation = _compute_mask(x_obs, y_obs, study_area, precision, azimuth, elevation)
# Finalization
# Set horizon
# TODO negative values should be allowed (when located at peak of mountain)
helevation[helevation < 0] = 0
hzenith = 90 - helevation
# Store azimuth from 0 to 360 (for using with solar azimuth functions)
hazimuth = np.linspace(0, 360, len(helevation))
return helevation, hzenith, hazimuth
def _compute_mask(x_obs, y_obs, study_area, precision, azimuth, elevation):
# Elevation vector length
# degrees
length_elevation = precision * 90 + 1
height, width = study_area.shape
# Specific azimuth values for NE (-180 to -90) and NW (90 to 180) areas
# so bin edges are ordered (in the computation below)
azimuthNE = azimuth.copy()
azimuthNE[:y_obs, x_obs - 1] = azimuthNE[:y_obs, x_obs - 1] - 360
azimuthNW = azimuth.copy()
azimuthNW[:y_obs, x_obs] = azimuthNW[:y_obs, x_obs] + 360
# Initialization
north_distance = y_obs
east_distance = width - x_obs
south_distance = height - y_obs
west_distance = x_obs
# TODO initialize with -inf
# TODO process NO DATA
elevationNE = np.zeros((north_distance, length_elevation))
elevationE = np.zeros((east_distance, 2 * length_elevation - 1))
elevationS = np.zeros((south_distance, 2 * length_elevation - 1))
elevationW = np.zeros((west_distance, 2 * length_elevation - 1))
elevationNW = np.zeros((north_distance, length_elevation))
azNE = np.linspace(-180, -90, length_elevation)
azE = np.linspace(-180, 0, 2 * length_elevation - 1)
azS = np.linspace(-90, 90, 2 * length_elevation - 1)
azW = np.linspace(0, 180, 2 * length_elevation - 1)
azNW = np.linspace(90, 180, length_elevation)
# Main computation
# North divided into 2 sections : -180 to -90 in W ; 90 to 180 in E
# Retrieve all elevation angles for iso-azimuth lines (loxodromes / rhumb lines)
for isoline in range(north_distance):
k = np.digitize(azNE, azimuthNE[isoline, x_obs - 1 :])
valid_k = (k != 0) & (k != east_distance + 1)
elevationNE[isoline, valid_k] = elevation[isoline, x_obs - 1 + k[valid_k]]
k2 = np.digitize(azNW, azimuthNW[isoline, : x_obs + 1])
valid_k2 = (k2 != 0) & (k2 != (west_distance + 1))
elevationNW[isoline, valid_k2] = elevation[isoline, k2[valid_k2] - 1]
for isoline in range(east_distance):
k = np.digitize(azE, azimuth[:, x_obs + isoline])
valid_k = (k != 0) & (k != height)
elevationE[isoline, valid_k] = elevation[k[valid_k], x_obs + isoline]
for isoline in range(south_distance):
k = np.digitize(azS, azimuth[y_obs + isoline, ::-1])
valid_k = (k != 0) & (k != width)
elevationS[isoline, valid_k] = elevation[
y_obs + isoline, width - 1 - k[valid_k]
]
for isoline in range(west_distance):
k = np.digitize(azW, azimuth[::-1, isoline])
valid_k = (k != 0) & (k != height)
elevationW[isoline, valid_k] = elevation[height - 1 - k[valid_k], isoline]
# max for each angle (2nd dimension) for each sunmask
sun_maskNE = np.max(elevationNE, axis=0)
sun_maskE = np.max(elevationE, axis=0)
sun_maskS = np.max(elevationS, axis=0)
sun_maskW = np.max(elevationW, axis=0)
sun_maskNW = np.max(elevationNW, axis=0)
# Global azimuth (North to North) and sun mask (elevation angle)
azNtoN = np.concatenate([azNE, azE, azS, azW, azNW])
sun_mask = np.concatenate([sun_maskNE, sun_maskE, sun_maskS, sun_maskW, sun_maskNW])
total_length_elevation = precision * 360 + 1
helevation = np.zeros(total_length_elevation)
# Corresponding azimuth (from -180° to 180°)
az = np.linspace(-180, 180, total_length_elevation)
for i in range(len(az)):
# max of angle r across all the sunmasks
helevation[i] = np.max(sun_mask[azNtoN == az[i]])
return helevation
def _extract_data(latlon, raster, distance, ellipsoid):
lat, lon = latlon
lat_resolution, lon_resolution = _resolution(raster.transform)
row, col = raster.index(lon, lat)
if not 0 <= row < raster.height or not 0 <= col < raster.width:
# TODO specific exception
raise Exception("LatLon not covered by DEM")
# deg corresponding to distance
lat_distance, lon_distance = _dist2deg(latlon, distance, ellipsoid)
topo_lat_distance = round(lat_distance / lat_resolution)
topo_lon_distance = round(lon_distance / lon_resolution)
window_base = rasterio.windows.Window(0, 0, raster.width, raster.height)
window = rasterio.windows.Window(
# clamp to the raster area
col - topo_lon_distance,
row - topo_lat_distance,
topo_lon_distance * 2,
topo_lat_distance * 2,
)
window = window.intersection(window_base)
study_area = raster.read(1, window=window)
study_transform = raster.window_transform(window)
return study_area, study_transform
def _elevation_angle(
z_obs,
study_area,
latlon,
lat_grid,
lon_grid,
ellipsoid,
):
latlon = np.deg2rad(latlon)
lat_grid = np.deg2rad(lat_grid)
lon_grid = np.deg2rad(lon_grid)
# Compute cartesian coordinates of point A and B located at altitude
# z_obs and study_area from the ellipsoid surface (ellipsoidal heights)
x_A, y_A, z_A = _geographic2cartesian(*latlon, z_obs, ellipsoid)
x_B, y_B, z_B = _geographic2cartesian(lat_grid, lon_grid, study_area, ellipsoid)
# Scalar product between AB and normal to the point A
inner_product = (
(x_B - x_A) * np.cos(latlon[1]) * np.cos(latlon[0])
+ (y_B - y_A) * | np.sin(latlon[1]) | numpy.sin |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import time
from tqdm import tqdm
import datetime
from dataloader_pipeline import Dataloader
import os
import numpy as np
import re
from sklearn.metrics import roc_auc_score
from matplotlib import pyplot as plt
import csv
from matplotlib.gridspec import SubplotSpec
from PIL import Image, ImageFont, ImageDraw
class TestModel(object):
def __init__(self):
# Cublass error
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# tf.enable_eager_execution(True)
self.TOTAL_IMGS_TEST = 32196
self.IMG_WIDTH = 256
self.IMG_HEIGHT = 256
self.OUTPUT_CHANNELS = 3
self.PATH_dataset = 'dataset/BDDA/'
self.PATH_dataset_DADA = 'dataset/DADA2000/'
self.checkpoint_dir = './training_checkpoints'
print('Models available: {}'.format(
sorted(os.listdir(self.checkpoint_dir))))
self.name = input("Select model? ")
self.BATCH_SIZE = int(self.name.split('_')[-1])
self.dataloader = Dataloader(self.IMG_WIDTH,
self.IMG_HEIGHT,
self.OUTPUT_CHANNELS)
image_path_test = str(self.PATH_dataset +
'test/camera_images/all_images/*.jpg')
self.test_dataset = tf.data.Dataset.list_files(image_path_test)
self.test_dataset = self.test_dataset.map(
self.dataloader.load_image_test)
self.test_dataset = self.test_dataset.batch(self.BATCH_SIZE)
image_path_test_DADA = str(self.PATH_dataset_DADA +
'testing/camera_images/all_images/*.jpg')
self.test_dataset_DADA = tf.data.Dataset.list_files(image_path_test_DADA)
self.test_dataset_DADA = self.test_dataset_DADA.map(
self.dataloader.load_image_test)
self.test_dataset_DADA = self.test_dataset_DADA.batch(self.BATCH_SIZE)
self.display_images = False
self.log_dir="logs/"
self.auc = tf.keras.metrics.AUC(num_thresholds=200, curve='ROC')
self.kld = tf.keras.metrics.KLDivergence(
name='kullback_leibler_divergence', dtype=None)
# Mean metrics
self.mean_kld_bdda = 0
self.mean_cc_bdda = 0
self.mean_sAUC_bdda = 0
self.mean_kld_dada = 0
self.mean_cc_dada = 0
self.mean_sAUC_dada = 0
self.list_kld = []
self.list_cc = []
self.list_sAUC = []
self.best_kld = 1
self.best_cc = 0
def correlation_coefficient(self, patch1, patch2):
product = tf.math.reduce_mean(
(patch1 - tf.math.reduce_mean(patch1)) *
(patch2 - tf.math.reduce_mean(patch2)))
stds = tf.math.reduce_std(patch1) * tf.math.reduce_std(patch2)
if stds == 0.0:
return 0.0
else:
product /= stds
return product
def calculate_metrics(self, target, gen_output):
kld_metric = self.kld(target, gen_output)
mae_metric = tf.keras.metrics.mean_absolute_error(target, gen_output)
mse_metric = tf.keras.metrics.mean_squared_error(target, gen_output)
kld_metric = tf.reduce_mean(kld_metric)
mae_metric = tf.reduce_mean(mae_metric)
mse_metric = tf.reduce_mean(mse_metric)
correlation_coefficient = self.pearson_r(target, gen_output)
# correlation_coefficient = self.cross_entropy(target, gen_output)
auc = self.auc(target, gen_output)
# s_auc = np.std(auc)
# print(kld_metric.numpy())
self.list_kld.append(kld_metric.numpy())
self.list_cc.append(correlation_coefficient.numpy())
self.list_sAUC.append(auc.numpy())
return kld_metric, mae_metric, mse_metric, correlation_coefficient, auc
def pearson_r(self, y_true, y_pred):
x = y_true
y = y_pred
mx = tf.reduce_mean(x, axis=1, keepdims=True)
my = tf.reduce_mean(y, axis=1, keepdims=True)
xm, ym = x - mx, y - my
t1_norm = tf.nn.l2_normalize(xm, axis = 1)
t2_norm = tf.nn.l2_normalize(ym, axis = 1)
cosine = tf.compat.v1.losses.cosine_distance(t1_norm, t2_norm, axis = 1)
return cosine
# @tf.function
def test_step(self, input_image, target, step, dataset = 'bdda'):
gen_output = self.generator(input_image, training=False)
l1_metric = tf.reduce_mean(tf.abs(target - gen_output))
kld_metric, mae_metric, mse_metric, correlation_coefficient, auc = \
self.calculate_metrics(target, gen_output)
if self.display_images:
if kld_metric < self.best_kld:
self.best_kld = kld_metric
self.generate_images(input_image,
target,
gen_output,
step,
dataset,
'kld' ,
kld_metric)
if correlation_coefficient > self.best_cc:
self.best_cc = correlation_coefficient
self.generate_images(input_image,
target,
gen_output,
step,
dataset,
'cc' ,
correlation_coefficient)
tensorboard_step = step * self.BATCH_SIZE
with self.summary_writer.as_default():
tf.summary.scalar('testing_l1_metric_' + dataset,
l1_metric,
step = tensorboard_step)
tf.summary.scalar('testing_kld_metric_' + dataset,
kld_metric,
step = tensorboard_step)
tf.summary.scalar('testing_mae_metric_' + dataset,
mae_metric,
step = tensorboard_step)
tf.summary.scalar('testing_mse_metric_' + dataset,
mse_metric,
step = tensorboard_step)
tf.summary.scalar('testing_correlation_coefficient_' + dataset,
correlation_coefficient,
step = tensorboard_step)
tf.summary.scalar('testing_roc_auc_score_' + dataset,
auc,
step = tensorboard_step)
def create_subtitle(self, fig: plt.Figure, grid: SubplotSpec, title: str):
"Sign sets of subplots with title"
row = fig.add_subplot(grid)
# the '\n' is important
row.set_title(f'\n{title}\n', fontweight='semibold' , size=50)
# hide subplot
row.set_frame_on(False)
row.axis('off')
def tensor_to_image(self, tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return Image.fromarray(tensor)
def generate_images(self,
input_image,
target,
gen_output,
step,
dataset,
metric,
value):
image_comp = Image.new('RGB', (2048 + 120, 256*3), color=0)
image_text = Image.new('RGB', (120, 256*3), color=0)
d = ImageDraw.Draw(image_text)
font = ImageFont.truetype("times-ro.ttf", 25)
d.text((10, 128), " RGB", font=font, fill=(255, 255, 255, 255))
d.text((10, 100 + 256), " RGB \n\n GT",
font=font,
fill=(255, 255, 255, 255))
d.text((10, 100 + 512), " RGB \n\nPrediction",
font=font,
fill=(255, 255, 255, 255))
image_comp.paste(image_text, (0, 0))
for j in range(self.BATCH_SIZE):
image = self.tensor_to_image(input_image[j])
map = self.tensor_to_image(gen_output[j])
gt = self.tensor_to_image(target[j])
im = Image.blend(image, map, 0.7)
im_gt = Image.blend(image, gt, 0.7)
image_comp.paste(image, (256 * j + 120, 0))
image_comp.paste(im, (256 * j + 120, 512))
image_comp.paste(im_gt, (256 * j + 120, 256))
image_comp.save(
'img_bbda_inferencia/{}_at_dataset_{}_at step_{:04d}_metric_{}\
_value_{}.png'.format(self.name, dataset, step,metric, value))
def main(self):
for epoch in range(0,15):
self.summary_writer = tf.summary.create_file_writer(
self.log_dir + "test/" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") +
"_" + self.name + "_" + str(self.BATCH_SIZE) +
"_epoch_" + str(epoch))
self.model_path = self.checkpoint_dir + '/' + self.name + '/epoch_' + str(epoch)
self.generator = tf.keras.models.load_model(self.model_path, custom_objects={'tf': tf})
# Check its architecture
# self.generator.summary()
for step, (input_image, target) in tqdm(self.test_dataset.enumerate()):
self.test_step(input_image, target, step, dataset = 'bdda')
self.mean_kld_bdda = np.mean(self.list_kld)
self.mean_cc_bdda = np.mean(self.list_cc)
self.mean_sAUC_bdda = np.mean(self.list_sAUC)
self.list_kld = []
self.list_cc = []
self.list_sAUC = []
self.best_kld = 1
self.best_cc = 0
for step, (input_image, target) in tqdm(self.test_dataset_DADA.enumerate()):
self.test_step(input_image, target, step, dataset = 'dada')
self.mean_kld_dada = np.mean(self.list_kld)
self.mean_cc_dada = | np.mean(self.list_cc) | numpy.mean |
# CREATED:2013-11-30 14:22:33 by <NAME> <<EMAIL>>
#
# Restricted FDA
# only compute between-class scatter within each song
import itertools
import numpy as np
import scipy.linalg
from sklearn.base import BaseEstimator, TransformerMixin
class RFDA(BaseEstimator, TransformerMixin):
def __init__(self, sigma=1e-4):
'''Ordinal linear discriminant analysis
Arguments:
----------
sigma : float
Regularization parameter
'''
self.sigma = sigma
self.scatter_restricted_ = None
self.scatter_within_ = None
def fit(self, X, Y):
'''Fit the RFDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
# Re-initialize the scatter matrices
self.scatter_restricted_ = None
self.scatter_within_ = None
# Reduce to partial-fit
self.partial_fit(X, Y)
return self
def partial_fit(self, X, Y):
'''Partial-fit the RFDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
for (xi, yi) in itertools.izip(X, Y):
if self.scatter_within_ is None:
# First round: initialize
d, n = xi.shape
if yi[0] > 0:
yi = np.concatenate([np.array([0]), yi])
if yi[-1] < n:
yi = np.concatenate([yi, np.array([n])])
self.scatter_within_ = self.sigma * np.eye(d)
self.scatter_restricted_ = np.zeros(d)
# compute the mean and cov of each segment
global_mean = np.mean(xi, axis=1, keepdims=True)
# iterate over segments
for (seg_start, seg_end) in zip(yi[:-1], yi[1:]):
seg_length = seg_end - seg_start
if seg_length < 2:
continue
seg_mean = | np.mean(xi[:, seg_start:seg_end], axis=1, keepdims=True) | numpy.mean |
# Copyright (c) 2020-2021 The Center for Theoretical Biological Physics (CTBP) - Rice University
# This file is from the Open-MiChroM project, released under the MIT License.
R"""
The :class:`~.ChromDynamics` classes perform chromatin dynamics based on the compartment annotations sequence of chromosomes. The simulations can be performed either using the default parameters of MiChroM (Minimal Chromatin Model) or using custom values for the type-to-type and Ideal Chromosome parameters..
"""
from simtk.openmm.app import *
import simtk.openmm as openmm
import simtk.unit as units
from sys import stdout, argv
import numpy as np
from six import string_types
import os
import time
import random
import h5py
from scipy.spatial import distance
import scipy as sp
import itertools
from pandas import DataFrame
class MiChroM:
R"""
The :class:`~.MiChroM` class performs chromatin dynamics employing the default MiChroM energy function parameters for the type-to-type and Ideal Chromosome interactions.
Details about the MiChroM (Minimal Chromatin Model) energy function and the default parameters are decribed in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173."
The :class:`~.MiChroM` sets the environment to start the chromatin dynamics simulations.
Args:
time_step (float, required):
Simulation time step in units of :math:`\tau`. (Default value = 0.01).
collision_rate (float, required):
Friction/Damping constant in units of reciprocal time (:math:`1/\tau`). (Default value = 0.1).
temperature (float, required):
Temperature in reduced units. (Default value = 1.0).
verbose (bool, optional):
Whether to output the information in the screen during the simulation. (Default value: :code:`False`).
velocity_reinitialize (bool, optional):
Reset/Reinitialize velocities if :math:`E_{kin}` is greater than 5.0. (Default value: :code:`True`).
name (str):
Name used in the output files. (Default value: *Chromosome*).
length_scale (float, required):
Length scale used in the distances of the system in units of reduced length :math:`\sigma`. (Default value = 1.0).
mass_scale (float, required):
Mass scale used in units of :math:`\mu`. (Default value = 1.0).
"""
def __init__(
self, time_step=0.01, collision_rate=0.1, temperature=1.0,
verbose=False,
velocity_reinitialize=True,
name="Chromosome",
length_scale=1.0,
mass_scale=1.0):
self.name = name
self.timestep = time_step
self.collisionRate = collision_rate
self.temperature = temperature * 120.0
self.verbose = verbose
self.velocityReinitialize = velocity_reinitialize
self.loaded = False
self.forcesApplied = False
self.folder = "."
self.metadata = {}
self.length_scale = length_scale
self.mass_scale = mass_scale
self.eKcritical = 50000000
self.nm = units.meter * 1e-9
self.Sigma = 1.0
self.Epsilon = 1.0
##################### A1 A2 B1 B2 B3 B4 NA
self.inter_Chrom_types =[-0.268028,-0.274604,-0.262513,-0.258880,-0.266760,-0.266760,-0.225646, #A1
-0.274604,-0.299261,-0.286952,-0.281154,-0.301320,-0.301320,-0.245080, #A2
-0.262513,-0.286952,-0.342020,-0.321726,-0.336630,-0.336630,-0.209919, #B1
-0.258880,-0.281154,-0.321726,-0.330443,-0.329350,-0.329350,-0.282536, #B2
-0.266760,-0.301320,-0.336630,-0.329350,-0.341230,-0.341230,-0.349490, #B3
-0.266760,-0.301320,-0.336630,-0.329350,-0.341230,-0.341230,-0.349490, #B4
-0.225646,-0.245080,-0.209919,-0.282536,-0.349490,-0.349490,-0.255994] #NA
def setup(self, platform="CUDA", PBC=False, PBCbox=None, GPU="default",
integrator="langevin", errorTol=None, precision="mixed",deviceIndex="0"):
R"""Sets up the parameters of the simulation OpenMM platform.
Args:
platform (str, optional):
Platform to use in the simulations. Opitions are *CUDA*, *OpenCL*, *HIP*, *CPU*, *Reference*. (Default value: *CUDA*).
PBC (bool, optional)
Whether to use periodic boundary conditions. (Default value: :code:`False`).
PBCbox ([float,float,float], optional):
Define size of the bounding box for PBC. (Default value: :code:`None`).
GPU ( :math:`0` or :math:`1`, optional):
Switch to another GPU. Machines with one GPU automatically select the right GPU. Machines with two or more GPUs select GPU that is less used.
integrator (str):
Integrator to use in the simulations. Options are *langevin*, *variableLangevin*, *verlet*, *variableVerlet* and, *brownian*. (Default value: *langevin*).
verbose (bool, optional):
Whether to output the information in the screen during the simulation. (Default value: :code:`False`).
deviceIndex (str, optional):
Set of Platform device index IDs. Ex: 0,1,2 for the system to use the devices 0, 1 and 2. (Use only when GPU != default)
errorTol (float, required if **integrator** = *variableLangevin*):
Error tolerance parameter for *variableLangevin* integrator.
"""
self.step = 0
if PBC == True:
self.metadata["PBC"] = True
precision = precision.lower()
if precision not in ["mixed", "single", "double"]:
raise ValueError("Presision must be mixed, single or double")
self.kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
self.kT = self.kB * self.temperature
self.mass = 10.0 * units.amu * self.mass_scale
self.bondsForException = []
self.mm = openmm
self.system = self.mm.System()
self.PBC = PBC
if self.PBC == True:
if PBCbox is None:
data = self.getPositions()
data -= np.min(data, axis=0)
datasize = 1.1 * (2 + (np.max(self.getPositions(), axis=0) - np.min(self.getPositions(), axis=0)))
self.SolventGridSize = (datasize / 1.1) - 2
print("density is ", self.N / (datasize[0]
* datasize[1] * datasize[2]))
else:
PBCbox = np.array(PBCbox)
datasize = PBCbox
self.metadata["PBCbox"] = PBCbox
self.system.setDefaultPeriodicBoxVectors([datasize[0], 0.,
0.], [0., datasize[1], 0.], [0., 0., datasize[2]])
self.BoxSizeReal = datasize
self.GPU = str(GPU)
properties = {}
if self.GPU.lower() != "default":
properties["DeviceIndex"] = deviceIndex
properties["Precision"] = precision
self.properties = properties
if platform.lower() == "opencl":
platformObject = self.mm.Platform.getPlatformByName('OpenCL')
elif platform.lower() == "reference":
platformObject = self.mm.Platform.getPlatformByName('Reference')
elif platform.lower() == "cuda":
platformObject = self.mm.Platform.getPlatformByName('CUDA')
elif platform.lower() == "cpu":
platformObject = self.mm.Platform.getPlatformByName('CPU')
elif platform.lower() == "hip":
platformObject = self.mm.Platform.getPlatformByName('HIP')
else:
self.exit("\n!!!! Unknown platform !!!!\n")
self.platform = platformObject
self.forceDict = {}
self.integrator_type = integrator
if isinstance(integrator, string_types):
integrator = str(integrator)
if integrator.lower() == "langevin":
self.integrator = self.mm.LangevinIntegrator(self.temperature,
self.collisionRate, self.timestep)
elif integrator.lower() == "variablelangevin":
self.integrator = self.mm.VariableLangevinIntegrator(self.temperature,
self.collisionRate, errorTol)
elif integrator.lower() == "verlet":
self.integrator = self.mm.VariableVerletIntegrator(self.timestep)
elif integrator.lower() == "variableverlet":
self.integrator = self.mm.VariableVerletIntegrator(errorTol)
elif integrator.lower() == 'brownian':
self.integrator = self.mm.BrownianIntegrator(self.temperature,
self.collisionRate, self.timestep)
else:
print ('please select from "langevin", "variablelangevin", '
'"verlet", "variableVerlet", '
'"brownian" or provide an integrator object')
else:
self.integrator = integrator
self.integrator_type = "UserDefined"
def saveFolder(self, folder):
R"""Sets the folder path to save data.
Args:
folder (str, optional):
Folder path to save the simulation data. If the folder path does not exist, the function will create the directory.
"""
if os.path.exists(folder) == False:
os.mkdir(folder)
self.folder = folder
def loadStructure(self, filename,center=True,masses=None):
R"""Loads the 3D position of each bead of the chromosome polymer in the OpenMM system platform.
Args:
center (bool, optional):
Whether to move the center of mass of the chromosome to the 3D position ``[0, 0, 0]`` before starting the simulation. (Default value: :code:`True`).
masses (array, optional):
Masses of each chromosome bead measured in units of :math:`\mu`. (Default value: :code:`None`).
"""
data = filename
data = np.asarray(data, float)
if len(data) == 3:
data = np.transpose(data)
if len(data[0]) != 3:
self._exitProgram("Wrong file format")
if np.isnan(data).any():
self._exitProgram("\n!!!! The file contains NAN's !!!!\n")
if center is True:
av = np.mean(data, 0)
data -= av
if center == "zero":
minvalue = np.min(data, 0)
data -= minvalue
self.setPositions(data)
if masses == None:
self.masses = [1. for _ in range(self.N)]
else:
self.masses = masses
if not hasattr(self, "chains"):
self.setChains()
def setChains(self, chains=[(0, None, 0)]):
R"""Sets configuration of the chains in the system. This information is later used for adding Bonds and Angles of the Homopolymer potential.
Args:
chains (list of tuples, optional):
The list of chains in the format [(start, end, isRing)]. isRing is a boolean whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). The particle range should be semi-open, i.e., a chain :math:`(0,3,0)` links the particles :math:`0`, :math:`1`, and :math:`2`. If :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. The default value links all particles of the system into one chain. (Default value: :code:`[(0, None, 0)]`).
"""
self.chains = [i for i in chains]
for i in range(len(self.chains)):
start, end, isRing = self.chains[i]
self.chains[i] = (start, end, isRing)
def setPositions(self, beadsPos , random_offset = 1e-5):
R"""Sets the 3D position of each bead of the chromosome polymer in the OpenMM system platform.
Args:
beadsPos (:math:`(N, 3)` :class:`numpy.ndarray`):
Array of XYZ positions for each bead (locus) in the polymer model.
random_offset (float, optional):
A small increment in the positions to avoid numeral instability and guarantee that a *float* parameter will be used. (Default value = 1e-5).
"""
data = np.asarray(beadsPos, dtype="float")
if random_offset:
data = data + (np.random.random(data.shape) * 2 - 1) * random_offset
self.data = units.Quantity(data, self.nm)
self.N = len(self.data)
if hasattr(self, "context"):
self.initPositions()
def getPositions(self):
R"""
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
return np.asarray(self.data / self.nm, dtype=np.float32)
def randomizePositions(self):
R"""
Runs automatically to offset the positions if it is an integer (int) variable.
"""
data = self.getPositions()
data = data + np.random.randn(*data.shape) * 0.0001
self.setPositions(data)
def getLoops(self, looplists):
R"""
Get the loop position (CTFC anchor points) for each chromosome.
.. note:: For Multi-chain simulations, the ordering of the loop list files is important! The order of the files should be the same as used in the other functions.
Args:
looplists (text file):
A two-column text file containing the index *i* and *j* of a loci pair that form loop interactions.
"""
self.loopPosition = []
for file, chain in zip(looplists,self.chains):
aFile = open(file,'r')
pos = aFile.read().splitlines()
m = int(chain[0])
for t in range(len(pos)):
pos[t] = pos[t].split()
pos[t][0] = int(pos[t][0]) +m
pos[t][1] = int(pos[t][1]) +m
self.loopPosition.append(pos[t])
def addFlatBottomHarmonic(self, kr=5*10**-3, n_rad=10.0):
R"""
Sets a Flat-Bottom Harmonic potential to collapse the chromosome chain inside the nucleus wall. The potential is defined as: :math:`step(r-r0) * (kr/2)*(r-r0)^2`.
Args:
kr (float, required):
Spring constant. (Default value = 5e-3).
n_rad (float, required):
Nucleus wall radius in units of :math:`\sigma`. (Default value = 10.0).
"""
restraintForce = self.mm.CustomExternalForce("step(r-r_res) * 0.5 * kr * (r-r_res)^2; r=sqrt(x*x+y*y+z*z)")
restraintForce.addGlobalParameter('r_res', n_rad)
restraintForce.addGlobalParameter('kr', kr)
for i in range(self.N):
restraintForce.addParticle(i, [])
self.forceDict["FlatBottomHarmonic"] = restraintForce
def addSphericalConfinementLJ(self, r="density", density=0.1):
R"""
Sets the nucleus wall potential according to MiChroM Energy function. The confinement potential describes the interaction between the chromosome and a spherical wall.
Args:
r (float or str="density", optional):
Radius of the nucleus wall. If **r="density"** requires a **density** value.
density (float, required if **r="density"**):
Density of the chromosome beads inside the nucleus. (Default value = 0.1).
"""
spherForce = self.mm.CustomExternalForce("(4 * GROSe * ((GROSs/r)^12 - (GROSs/r)^6) + GROSe) * step(GROScut - r);"
"r= R - sqrt(x^2 + y^2 + z^2) ")
self.forceDict["SphericalConfinementLJ"] = spherForce
for i in range(self.N):
spherForce.addParticle(i, [])
if r == "density":
r = (3 * self.N / (4 * 3.141592 * density)) ** (1 / 3.)
self.sphericalConfinementRadius = r
spherForce.addGlobalParameter('R', r)
spherForce.addGlobalParameter('GROSe', 1.0)
spherForce.addGlobalParameter('GROSs', 1.0)
spherForce.addGlobalParameter("GROScut", 2.**(1./6.))
return r
def addFENEBonds(self, kfb=30.0):
R"""
Adds FENE (Finite Extensible Nonlinear Elastic) bonds between neighbor loci :math:`i` and :math:`i+1` according to "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2011. Molecular dynamics simulation study of nonconcatenated ring polymers in a melt. I. Statics. The Journal of chemical physics, 134(20), p.204904".
Args:
kfb (float, required):
Bond coefficient. (Default value = 30.0).
"""
for start, end, isRing in self.chains:
for j in range(start, end):
self.addBond(j, j + 1, kfb=kfb)
self.bondsForException.append((j, j + 1))
if isRing:
self.addBond(start, end, distance=1, kfb=kfb)
self.bondsForException.append((start, end ))
self.metadata["FENEBond"] = repr({"kfb": kfb})
def _initFENEBond(self, kfb=30):
R"""
Internal function that inits FENE bond force.
"""
if "FENEBond" not in list(self.forceDict.keys()):
force = ("- 0.5 * kfb * r0 * r0 * log(1-(r/r0)*(r/r0)) + (4 * e * ((s/r)^12 - (s/r)^6) + e) * step(cut - r)")
bondforceGr = self.mm.CustomBondForce(force)
bondforceGr.addGlobalParameter("kfb", kfb)
bondforceGr.addGlobalParameter("r0", 1.5)
bondforceGr.addGlobalParameter('e', 1.0)
bondforceGr.addGlobalParameter('s', 1.0)
bondforceGr.addGlobalParameter("cut", 2.**(1./6.))
self.forceDict["FENEBond"] = bondforceGr
def addBond(self, i, j, distance=None, kfb=30):
R"""
Adds bonds between loci :math:`i` and :math:`j`
Args:
kfb (float, required):
Bond coefficient. (Default value = 30.0).
i (int, required):
Locus index **i**.
j (int, required):
Locus index **j**
"""
if (i >= self.N) or (j >= self.N):
raise ValueError("\n Cannot add a bond between beads %d,%d that are beyond the chromosome length %d" % (i, j, self.N))
if distance is None:
distance = self.length_scale
else:
distance = self.length_scale * distance
distance = float(distance)
self._initFENEBond(kfb=kfb)
self.forceDict["FENEBond"].addBond(int(i), int(j), [])
def addAngles(self, ka=2.0):
R"""
Adds an angular potential between bonds connecting beads :math:`i − 1, i` and :math:`i, i + 1` according to "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2011. Molecular dynamics simulation study of nonconcatenated ring polymers in a melt. I. Statics. The Journal of chemical physics, 134(20), p.204904".
Args:
ka (float, required):
Angle potential coefficient. (Default value = 2.0).
"""
try:
ka[0]
except:
ka = np.zeros(self.N, float) + ka
angles = self.mm.CustomAngleForce(
"ka * (1 - cos(theta - 3.141592))")
angles.addPerAngleParameter("ka")
for start, end, isRing in self.chains:
for j in range(start + 1, end):
angles.addAngle(j - 1, j, j + 1, [ka[j]])
if isRing:
angles.addAngle(end - 1, end , start, [ka[end]])
angles.addAngle(end , start, start + 1, [ka[start]])
self.metadata["AngleForce"] = repr({"stiffness": ka})
self.forceDict["AngleForce"] = angles
def addRepulsiveSoftCore(self, Ecut=4.0):
R"""
Adds a soft-core repulsive interaction that allows chain crossing, which represents the activity of topoisomerase II. Details can be found in the following publications:
- <NAME>., A.B., <NAME>., <NAME>. and <NAME>., 2021. A scalable computational approach for simulating complexes of multiple chromosomes. Journal of Molecular Biology, 433(6), p.166700.
- <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173.
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2013. Organization of the mitotic chromosome. Science, 342(6161), pp.948-953.
Args:
Ecut (float, required):
Energy cost for the chain passing in units of :math:`k_{b}T`. (Default value = 4.0).
"""
nbCutOffDist = self.Sigma * 2. ** (1. / 6.) #1.112
Ecut = Ecut*self.Epsilon
r_0 = self.Sigma*(((0.5*Ecut)/(4.0*self.Epsilon) - 0.25 +((0.5)**(2.0)))**(1.0/2.0) +0.5)**(-1.0/6.0)
repul_energy = ("LJ * step(r - r_0) * step(CutOff - r)"
" + step(r_0 - r)* 0.5 * Ecut * (1.0 + tanh( (2.0 * LJ/Ecut) - 1.0 ));"
"LJ = 4.0 * Epsi * ((Sig/r)^12 - (Sig/r)^6) + Epsi")
self.forceDict["RepulsiveSoftCore"] = self.mm.CustomNonbondedForce(
repul_energy)
repulforceGr = self.forceDict["RepulsiveSoftCore"]
repulforceGr.addGlobalParameter('Epsi', self.Epsilon)
repulforceGr.addGlobalParameter('Sig', self.Sigma)
repulforceGr.addGlobalParameter('Ecut', Ecut)
repulforceGr.addGlobalParameter('r_0', r_0)
repulforceGr.addGlobalParameter('CutOff', nbCutOffDist)
repulforceGr.setCutoffDistance(3.0)
for _ in range(self.N):
repulforceGr.addParticle(())
def addTypetoType(self, mu=3.22, rc = 1.78 ):
R"""
Adds the type-to-type interactions according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
"""
self.metadata["TypetoType"] = repr({"mu": mu})
if not hasattr(self, "type_list"):
self.type_list = self.random_ChromSeq(self.N)
energy = "mapType(t1,t2)*0.5*(1. + tanh(mu*(rc - r)))*step(r-1.0)"
crossLP = self.mm.CustomNonbondedForce(energy)
crossLP.addGlobalParameter('mu', mu)
crossLP.addGlobalParameter('rc', rc)
crossLP.setCutoffDistance(3.0)
fTypes = self.mm.Discrete2DFunction(7,7,self.inter_Chrom_types)
crossLP.addTabulatedFunction('mapType', fTypes)
crossLP.addPerParticleParameter("t")
for i in range(self.N):
value = [float(self.type_list[i])]
crossLP.addParticle(value)
self.forceDict["TypetoType"] = crossLP
def addCustomTypes(self, mu=3.22, rc = 1.78, TypesTable=None):
R"""
Adds the type-to-type potential using custom values for interactions between the chromatin types. The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
The function receives a txt/TSV/CSV file containing the upper triangular matrix of the type-to-type interactions. A file example can be found `here <https://www.ndb.rice.edu>`__.
+---+------+-------+-------+
| | A | B | C |
+---+------+-------+-------+
| A | -0.2 | -0.25 | -0.15 |
+---+------+-------+-------+
| B | | -0.3 | -0.15 |
+---+------+-------+-------+
| C | | | -0.35 |
+---+------+-------+-------+
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
TypesTable (file, required):
A txt/TSV/CSV file containing the upper triangular matrix of the type-to-type interactions. (Default value: :code:`None`).
"""
self.metadata["CrossLink"] = repr({"mu": mu})
if not hasattr(self, "type_list"):
self.type_list = self.random_ChromSeq(self.N)
energy = "mapType(t1,t2)*0.5*(1. + tanh(mu*(rc - r)))*step(r-lim)"
crossLP = self.mm.CustomNonbondedForce(energy)
crossLP.addGlobalParameter('mu', mu)
crossLP.addGlobalParameter('rc', rc)
crossLP.addGlobalParameter('lim', 1.0)
crossLP.setCutoffDistance(3.0)
lambdas_full = np.loadtxt(TypesTable, delimiter=',')
lambdas = np.triu(lambdas_full) + np.triu(lambdas_full, k=1).T
diff_types = len(lambdas)
print(len(lambdas))
lambdas = list(np.ravel(lambdas))
fTypes = self.mm.Discrete2DFunction(diff_types,diff_types,lambdas)
crossLP.addTabulatedFunction('mapType', fTypes)
AB_types = self.changeType_list()
crossLP.addPerParticleParameter("t")
for i in range(self.N):
value = [float(AB_types[i])]
crossLP.addParticle(value)
self.forceDict["CustomTypes"] = crossLP
def changeType_list(self):
R"""
Internal function for indexing unique chromatin types.
"""
n = set(self.type_list)
lista = np.array(self.type_list)
k=0
for t in n:
lista[lista==t] = k
k += 1
return(list(lista))
def addLoops(self, mu=3.22, rc = 1.78, X=-1.612990, looplists=None):
R"""
Adds the Loops interactions according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
.. note:: For Multi-chain simulations, the ordering of the loop list files is important! The order of the files should be the same as used in the other functions.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
X (float, required):
Loop interaction parameter. (Default value = -1.612990).
looplists (file, optional):
A two-column text file containing the index *i* and *j* of a loci pair that form loop interactions. (Default value: :code:`None`).
"""
ELoop = "qsi*0.5*(1. + tanh(mu*(rc - r)))"
Loop = self.mm.CustomBondForce(ELoop)
Loop.addGlobalParameter('mu', mu)
Loop.addGlobalParameter('rc', rc)
Loop.addGlobalParameter('qsi', X)
self.getLoops(looplists)
for p in self.loopPosition:
Loop.addBond(p[0]-1,p[1]-1)
self.forceDict["Loops"] = Loop
def addCustomIC(self, mu=3.22, rc = 1.78, dinit=3, dend=200, IClist=None):
R"""
Adds the Ideal Chromosome potential using custom values for interactions between beads separated by a genomic distance :math:`d`. The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 200).
IClist (file, optional):
A one-column text file containing the energy interaction values for loci *i* and *j* separated by a genomic distance :math:`d`. (Default value: :code:`None`).
"""
energyIC = ("step(d-dinit)*IClists(d)*step(dend -d)*f*step(r-lim);"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx2-idx1)")
IC = self.mm.CustomNonbondedForce(energyIC)
IClist = np.append(np.zeros(dend),IClist)[:-dend]
tabIClist = self.mm.Discrete1DFunction(IClist)
IC.addTabulatedFunction('IClist', tabIClist)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.addGlobalParameter('lim', 1.0)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["CustomIC"] = IC
def addIdealChromosome(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500):
R"""
Adds the Ideal Chromosome potential for interactions between beads separated by a genomic distance :math:`d` according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend -d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome"] = IC
def addMultiChainIC(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500, chains=None):
R"""
Adds the Ideal Chromosome potential for multiple chromosome simulations. The interactions between beads separated by a genomic distance :math:`d` is applied according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
chains (list of tuples, optional):
The list of chains in the format [(start, end, isRing)]. isRing is a boolean whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). The particle range should be semi-open, i.e., a chain :math:`(0,3,0)` links the particles :math:`0`, :math:`1`, and :math:`2`. If :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. The default value links all particles of the system into one chain. (Default value: :code:`[(0, None, 0)]`).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend-d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3)
groupList = list(range(chains[0],chains[1]+1))
IC.addInteractionGroup(groupList,groupList)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome_chain_"+str(chains[0])] = IC
def _loadParticles(self):
R"""
Internal function that loads the chromosome beads into the simulations system.
"""
if not hasattr(self, "system"):
return
if not self.loaded:
for mass in self.masses:
self.system.addParticle(self.mass * mass)
if self.verbose == True:
print("%d particles loaded" % self.N)
self.loaded = True
def _applyForces(self):
R"""Internal function that adds all loci to the system and applies all the forces present in the forcedict."""
if self.forcesApplied == True:
return
self._loadParticles()
exc = self.bondsForException
print("Number of exceptions:", len(exc))
if len(exc) > 0:
exc = np.array(exc)
exc = np.sort(exc, axis=1)
exc = [tuple(i) for i in exc]
exc = list(set(exc))
for i in list(self.forceDict.keys()):
force = self.forceDict[i]
if hasattr(force, "addException"):
print('Add exceptions for {0} force'.format(i))
for pair in exc:
force.addException(int(pair[0]),
int(pair[1]), 0, 0, 0, True)
elif hasattr(force, "addExclusion"):
print('Add exclusions for {0} force'.format(i))
for pair in exc:
force.addExclusion(int(pair[0]), int(pair[1]))
if hasattr(force, "CutoffNonPeriodic") and hasattr(
force, "CutoffPeriodic"):
if self.PBC:
force.setNonbondedMethod(force.CutoffPeriodic)
print("Using periodic boundary conditions!!!!")
else:
force.setNonbondedMethod(force.CutoffNonPeriodic)
print("adding force ", i, self.system.addForce(self.forceDict[i]))
for i,name in enumerate(self.forceDict):
self.forceDict[name].setForceGroup(i)
self.context = self.mm.Context(self.system, self.integrator, self.platform, self.properties)
self.initPositions()
self.initVelocities()
self.forcesApplied = True
with open(self.folder+'/platform_info.dat', 'w') as f:
print('Name: ', self.platform.getName(), file=f)
print('Speed: ',self.platform.getSpeed(), file=f)
print('Property names: ',self.platform.getPropertyNames(), file=f)
for name in self.platform.getPropertyNames():
print(name,' value: ',self.platform.getPropertyValue(self.context, name), file=f)
def createRandomWalk(self, step_size=1.0, Nbeads=1000, segment_length=1):
R"""
Creates a chromosome polymer chain with beads position based on a random walk.
Args:
step_size (float, required):
The step size of the random walk. (Default value = 1.0).
Nbeads (int, required):
Number of beads of the chromosome polymer chain. (Default value = 1000).
segment_length (int, required):
Distance between beads. (Default value = 1).
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
theta = np.repeat(np.random.uniform(0., 1., Nbeads // segment_length + 1),
segment_length)
theta = 2.0 * np.pi * theta[:Nbeads]
u = np.repeat(np.random.uniform(0., 1., Nbeads // segment_length + 1),
segment_length)
u = 2.0 * u[:Nbeads] - 1.0
x = step_size * np.sqrt(1. - u * u) * np.cos(theta)
y = step_size * np.sqrt(1. - u * u) * np.sin(theta)
z = step_size * u
x, y, z = np.cumsum(x), np.cumsum(y), np.cumsum(z)
return np.vstack([x, y, z]).T
def loadNDB(self, NDBfiles=None):
R"""
Loads a single or multiple *.ndb* files and gets position and types of the chromosome beads.
Details about the NDB file format can be found at the `Nucleome Data Bank <https://ndb.rice.edu/ndb-format>`__.
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2021. The Nucleome Data Bank: web-based resources to simulate and analyze the three-dimensional genome. Nucleic Acids Research, 49(D1), pp.D172-D182.
Args:
NDBfiles (file, required):
Single or multiple files in *.ndb* file format. (Default value: :code:`None`).
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
Type_conversion = {'A1':0, 'A2':1, 'B1':2, 'B2':3,'B3':4,'B4':5, 'UN' :6}
x = []
y = []
z = []
index = []
start = 0
chains = []
sizeChain = 0
for ndb in NDBfiles:
aFile = open(ndb,'r')
lines = aFile.read().splitlines()
for line in lines:
line = line.split()
if line[0] == 'CHROM':
x.append(float(line[5]))
y.append(float(line[6]))
z.append(float(line[7]))
index.append(Type_conversion[line[2]])
sizeChain += 1
elif line[0] == "TER" or line[0] == "END":
break
chains.append((start, sizeChain-1, 0))
start = sizeChain
print("Chains: ", chains)
self.type_list = index
self.index = list(range(len(self.type_list)))
self.setChains(chains)
return np.vstack([x,y,z]).T
def loadGRO(self, GROfiles=None):
R"""
Loads a single or multiple *.gro* files and gets position and types of the chromosome beads.
Initially, the MiChroM energy function was implemented in GROMACS. Details on how to run and use these files can be found at the `Nucleome Data Bank <https://ndb.rice.edu/GromacsInput-Documentation>`__.
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2021. The Nucleome Data Bank: web-based resources to simulate and analyze the three-dimensional genome. Nucleic Acids Research, 49(D1), pp.D172-D182.
Args:
GROfiles (file, required):
Single or multiple files in *.gro* file format. (Default value: :code:`None`).
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
Type_conversion = {'ZA':0, 'OA':1, 'FB':2, 'SB':3,'TB':4, 'LB' :5, 'UN' :6}
x = []
y = []
z = []
index = []
start = 0
chains = []
sizeChain = 0
for gro in GROfiles:
aFile = open(gro,'r')
pos = aFile.read().splitlines()
size = int(pos[1])
#print(size)
for t in range(2, len(pos)-1):
pos[t] = pos[t].split()
x.append(float(pos[t][3]))
y.append(float(pos[t][4]))
z.append(float(pos[t][5]))
index.append(Type_conversion[pos[t][1]])
sizeChain += 1
chains.append((start, sizeChain-1, 0))
start = sizeChain
print("Chains: ", chains)
self.type_list = index
self.index = list(range(len(self.type_list)))
self.setChains(chains)
return np.vstack([x,y,z]).T
def loadPDB(self, PDBfiles=None):
R"""
Loads a single or multiple *.pdb* files and gets position and types of the chromosome beads.
Here we consider the chromosome beads as the carbon-alpha to mimic a protein. This trick helps to use the standard macromolecules visualization software.
The type-to-residue conversion follows: {'ALA':0, 'ARG':1, 'ASP':2, 'GLU':3,'GLY':4, 'LEU' :5, 'ASN' :6}.
Args:
PDBfiles (file, required):
Single or multiple files in *.pdb* file format. (Default value: :code:`None`).
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
Type_conversion = {'ALA':0, 'ARG':1, 'ASP':2, 'GLU':3,'GLY':4, 'LEU' :5, 'ASN' :6}
x = []
y = []
z = []
index = []
start = 0
chains = []
sizeChain = 0
for pdb in PDBfiles:
aFile = open(pdb,'r')
pos = aFile.read().splitlines()
for t in range(len(pos)):
pos[t] = pos[t].split()
if pos[t][0] == 'ATOM':
x.append(float(pos[t][5]))
y.append(float(pos[t][6]))
z.append(float(pos[t][7]))
index.append(Type_conversion[pos[t][3]])
sizeChain += 1
chains.append((start, sizeChain, 0))
start = sizeChain
print("chain: ", chains)
self.type_list = index
self.index = list(range(len(self.type_list)))
self.setChains(chains)
return np.vstack([x,y,z]).T
def create_springSpiral(self,Nbeads=1000, ChromSeq=None, isRing=False):
R"""
Creates a spring-spiral-like shape for the initial configuration of the chromosome polymer.
Args:
Nbeads (int, required):
Number of beads of the chromosome polymer chain. (Default value = 1000).
ChromSeq (file, required):
Chromatin sequence of types file. The first column should contain the locus index. The second column should have the locus type annotation. A template of the chromatin sequence of types file can be found at the `Nucleome Data Bank (NDB) <https://ndb.rice.edu/static/text/chr10_beads.txt>`__.
isRing (bool, optional):
Whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). f :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. (Default value = :code:`False`).
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
type_list=ChromSeq
x = []
y = []
z = []
if not hasattr(self, "type_list"):
self.type_list = []
if type_list == None:
beads = Nbeads
self.type_list = self.random_type(beads)
else:
self._translate_type(type_list)
beads = len(self.type_list)
self.index = list(range(beads))
for i in range(beads):
if (isRing):
a = 2.0*((beads-1)/beads)*np.pi*(i-1)/(beads-1)
a1 = 2.0*((beads-1)/beads)*np.pi*(2-1)/(beads-1)
else:
a = 1.7*np.pi*(i-1)/(beads-1)
a1 = 1.7*np.pi*(2-1)/(beads-1)
b=1/np.sqrt((4-3.0*np.cos(a1)-np.cos(10*a1)*np.cos(a1))**2 +
(0-3.0*np.sin(a1)-np.cos(10*a1)*np.sin(a1))**2+(np.sin(10*a1))**2)
x.append(1.5*np.pi*b+3*b* | np.cos(a) | numpy.cos |
# pip install opencv-python
# pip install opencv-contrib-python
# pip install pillow
import cv2
import requests
import numpy as np
from PIL import Image, ImageTk
class ImageColorTracker:
def __init__(self, file):
self.image_orig = cv2.imread(file)
self.image = cv2.cvtColor(self.image_orig, cv2.COLOR_BGR2RGB)
self.img = cv2.resize(self.image, (450,400))
self.hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
def display_original_image(self, size):
tkimg = cv2.resize(self.image, size)
tkimg = Image.fromarray(tkimg)
tkimg = ImageTk.PhotoImage(tkimg)
return tkimg
def detect_from_image(self, arr1, arr2):
lower = np.array(arr1)
upper = | np.array(arr2) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 10:26:50 2019
@author: joaor
"""
import numpy as np
import pandas as pd
n_instances = 400
n_time_points = 5
def generate_binomial_1(n_instances,n_time_points):
n_features=2
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_2(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0 and data[i,3] == 1:
data[i,4] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 1 and data[i,3] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+7] == 0 and data[i,t*n_features+8] == 1:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 1 and data[i,t*n_features+8] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.1, 1)
if data[i,2] == 1 and data[i,4] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 0 and data[i,4] == 1:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+7] == 1 and data[i,t*n_features+9] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 0 and data[i,t*n_features+9] == 1:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_3(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,3] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+1] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.6, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.4, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
df.drop(columns=["X1__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_3_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_3_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multinomial_1(n_instances,n_time_points):
n_features=3
values=np.arange(3)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(len(values))/len(values)
data[:,0] = np.random.choice(values,p=uniform, size=n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 2:
data[i,1] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,1] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,1] = np.random.choice(values,p=[0.05,0.9,0.05])
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=uniform)
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=uniform)
else:
data[i,2] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+1] == 0:
data[i,t*n_features+3] = | np.random.choice(values,p=[0.9,0.05,0.05]) | numpy.random.choice |
import numpy as np
from scipy.sparse import csr_matrix
import myLOBPCG_new
###########################################################################
#
# Parameters and file path
#
###########################################################################
TJfile='../../TJdata/triples_30000.dat'
prefix='../../output/' # directory contains rowA.binary, colA.binary, valA.binary; and will save outputs
numTJ=30000 # number of triple junctions
lamb=1000 # hyperparameter for the strength of the regularization
sym='Cubic' # Cubic or Hex, it changes the gbdat file header
fn= prefix+'Cub.gbdat' # the name of output gbdat file
###########################################################################
#
# Define util functions
#
###########################################################################
def read_dat(datFile, numTJ):
"""
Input: triples.dat, wrote from the fortran program Torq_gen
size=[numTJ*8,]
In each group, the data is [TJ directon, EA1, GB1, EA2, GB2, EA3, GB3]
Output: TJs, direction of the triple junctions
size = [numTJ, 3]
EAs, the EA angles of the 3 grains at a TJ
size = [numTJ, 3, 3]
norms, normal direction of the 3 GB at a TJ
size = [numTJ, 3, 3]
"""
with open(datFile) as f:
tmp = [line.split() for line in f if line.strip()]
TJs = np.zeros((numTJ, 3))
EAs = np.zeros((numTJ, 3, 3))
norms = np.zeros((numTJ, 3, 3))
for i in range(numTJ):
TJs[i,:] = np.array(tmp[i*8 + 1]).astype(float)
EAs[i,0, :] = np.array(tmp[i*8 + 2]).astype(float)
norms[i,0, :] = np.array(tmp[i*8 + 3]).astype(float)
EAs[i, 1, :] = np.array(tmp[i*8 + 4]).astype(float)
norms[i, 1, :] = np.array(tmp[i*8 + 5]).astype(float)
EAs[i, 2, :] = np.array(tmp[i*8 + 6]).astype(float)
norms[i, 2, :] = np.array(tmp[i*8 + 7]).astype(float)
return (TJs, EAs, norms)
def EulerZXZ2Mat(e):
"""
Active Euler Angle (radian) in ZXZ convention to active rotation matrix, which means newV=M*oldV
"""
x=e[0]
y=e[1]
z=e[2]
s1=np.sin(x)
s2=np.sin(y)
s3=np.sin(z)
c1=np.cos(x)
c2=np.cos(y)
c3= | np.cos(z) | numpy.cos |
import os
import json
import numpy as np
import seaborn as sns
import pandas as pd
from itertools import product
from pyitab.utils.matrix import copy_matrix, array_to_matrix
from pyitab.results.bids import filter_dataframe, get_results_bids
from pyitab.results.dataframe import apply_function, query_rows
from pyitab.plot.connectivity import plot_connectivity_circle_edited, plot_connectivity_lines
path = "/scratch/work/guidotr1/data/derivatives"
path = "/media/robbis/Seagate_Pt1/data/working_memory/derivatives/aalto/derivatives/"
full_df = get_results_bids(path, pipeline="triton+old", field_list=['estimator__fsel',
'ds.a.task',
'ds.a.prepro',
'ds.a.img_pattern',
'sample_slicer'])
dataframe_accuracy = apply_function(full_df,
keys=['targets', 'band', 'ds.a.task', 'k'],
attr='score_score',
fx=lambda x: np.mean(x))
dataframe_std = apply_function(full_df,
keys=['targets', 'band', 'ds.a.task', 'k'],
attr='score_score',
fx=lambda x: np.std(x))
max_k = query_rows(dataframe_accuracy,
keys=['targets', 'band', 'ds.a.task'],
attr='score_score', fx=np.max)
#########################################################################
from pyitab.utils.atlas import get_atlas_info
from sklearn.preprocessing import minmax_scale
from pyitab.plot.connectivity import plot_connectivity_lines
from pyitab.plot.nodes import barplot_nodes
from scipy.io import loadmat
full_df = filter_dataframe(full_df, **{'ds.a.task':['CONN']})
f = sns.relplot(x="k", y="score_score", col="band", hue="targets", row='ds.a.task',
height=5, aspect=.75, facet_kws=dict(sharex=False), col_order=order,
kind="line", legend="full", data=full_df
)
##########################
order = ['theta', 'alpha', 'beta', 'gamma']
titles = ['Theta', 'Alpha', 'Beta', 'Gamma']
full_df = filter_dataframe(full_df, **{'ds.a.task':['CONN']})
fig, axes = pl.subplots(1, 4, sharey=True, figsize=(16,4))
for j, band in enumerate(order):
#for k, target in enumerate(np.unique(df_merged['targets'])):
k = 0
target = "0back+2back"
ax = axes[j]
df = filter_dataframe(full_df, band=[band], targets=[target])
df_avg = apply_function(df, attr='score_score', keys=['k'], fx= np.mean)
df_std = apply_function(df, attr='score_score', keys=['k'], fx= np.std, ddof=1)
avg = df_avg['score_score'].values[::5]
std = (df_std['score_score'].values / np.sqrt(25))[::5]
values = np.int_(df_avg['score_score'].values >= .575)[::5]
kk = df_avg['k'].values[::5]
values = values * (.65 + k/50.)
values[values == 0] = np.nan
ax.plot(kk, avg, c='steelblue')
ax.fill_between(kk, avg+std, avg-std, color='steelblue', alpha=0.3)
ax.plot(kk, values, 'o', c="darkgray")
if j == 0:
ax.set_ylabel('Classification accuracy')
ax.set_xlabel('k')
ax.hlines(0.5, -2, np.max(df['k'].values)+2, colors='darkgray', linestyles='dashed')
ax.set_title(band)
fig.savefig("/home/robbis/Dropbox/PhD/experiments/jaakko/Submission_2020/singleband.svg",
dpi=200)
##########################################################################
# Select the results with the best accuracy for each band in 0back-2back #
from scipy.io import savemat
df_features = get_results_bids(path,
pipeline="triton+old",
field_list=['estimator__fsel',
'estimator__clf',
'cv',
'sample_slicer',
'ds.a.task'],
result_keys=['features'])
df_features['task'] = df_features['ds.a.task'].values
selections = [
{'band': ['alpha'], 'task': ['CONN'], 'k': [216]},
{'band': ['theta'], 'task': ['CONN'], 'k': [58]},
{'band': ['gamma'], 'task': ['CONN'], 'k': [7]},
{'band': ['alpha'], 'task': ['POWER'], 'k': [72]},
{'band': ['beta'], 'task': ['POWER'], 'k': [77]},
{'band': ['theta'], 'task': ['POWER'], 'k': [44]},
{'band': ['gamma'], 'task': ['POWER'], 'k': [1]},
{'band': ['alpha'], 'task': ['POWER'], 'k': [39]},
{'band': ['beta'], 'task': ['POWER'], 'k': [43]},
{'band': ['theta'], 'task': ['POWER'], 'k': [34]},
{'band': ['gamma'], 'task': ['POWER'], 'k': [1]},
]
mat_results = []
for selection in selections:
df = filter_dataframe(df_features, **selection)
features = apply_function(df, keys=['band', 'k', 'task'],
fx=lambda x:np.vstack(x).mean(0))
mat_results.append(features)
# Average
_ = selection.pop('k')
avg_selection = selection.copy()
df = filter_dataframe(df_features, **avg_selection)
df_avg = apply_function(df, attr='score_score', keys=['k'], fx= np.mean)
values = np.int_(df_avg['score_score'].values >= .55)
indices = np.nonzero(values)[0]
if len(indices) == 0:
continue
selection['k'] = indices
df_mean = filter_dataframe(df_features, **avg_selection)
features_ = apply_function(df_mean, keys=['band', 'task'],
fx=lambda x:np.vstack(x).mean(0))
mat_results.append(features_)
mat_results = pd.concat(mat_results)
savemat("probabilities_full.mat", {'data': mat_results.to_dict("list")})
######################### Plot of connectome #################################
from pyitab.utils.atlas import get_aalmeg_info
from mne.viz import circular_layout
mask_data = loadmat("/media/robbis/Seagate_Pt1/data/working_memory/data/sub_01/meg/connectivity_matrix.mat")
mask_ = np.sum(mask_data['data'], axis=0)
mask_node = mask_.sum(0)
mask_node = mask_node != 0
info_lr = get_aalmeg_info(background='white', grouping='LR')
labels_lr, colors_lr, node_idx_lr, coords_lr, networks_lr, node_angles_lr = info_lr
labels = labels_lr[:99]
node_idx = np.lexsort((labels.T[-1], [l[-1] for l in labels.T[1]]))
node_idx = np.hstack((node_idx[:49], node_idx[49:][::-1]))
labels_ord = labels[node_idx]
coords_lr_ord = coords_lr[node_idx]
names = labels_ord.T[1]
names = np.array([n.replace("_", " ") for n in names])
node_angles = circular_layout(names.tolist(),
names.tolist(),
start_between=False,
start_pos=90,
group_boundaries=[0, 49, len(names) / 2.+1],
group_sep=3.)
node_network = labels_ord.T[3]
networks, count = np.unique(node_network, return_counts=True)
color_network = sns.color_palette("Paired", len(networks)+1)
colors_ = dict(zip(networks, color_network[1:]))
colors = [colors_[n] for n in node_network]
data = []
titles = []
for i, row in mat_results[:6:2].iterrows():
band = row['band']
condition = row['task']
k = row['k']
matrix = array_to_matrix(row['features'], copy=True, diagonal_filler=0.)
key = "band: %s | condition: %s | k: %d"%(band, condition, k)
upper = matrix[np.triu_indices(matrix.shape[0], k=1)]
upper = upper[np.nonzero(upper)]
threshold = upper.mean() + 3.*upper.std()
if threshold > 1:
threshold = 0.98
f = plot_connectivity_lines(matrix[node_idx][:,node_idx],
names,
node_colors=colors,
node_position=node_angles,
con_thresh=threshold,
kind='circle',
facecolor='white',
colormap='magma_r',
fontsize=12,
title=key)
title_fig = "connection_%s_%s_%s.png" %(band, condition, k)
f.savefig("/media/robbis/DATA/fmri/working_memory/figures/2020_%s" % (title_fig))
####################### Plot of brain regions #############################
from nilearn.plotting import plot_connectome
from pyitab.utils.atlas import get_aalmeg_info
from mne.viz import circular_layout
import matplotlib as mpl
import matplotlib.cm as cm
from sklearn.preprocessing import minmax_scale
info_lr = get_aalmeg_info(background='white', grouping='LR')
labels_lr, colors_lr, node_idx_lr, coords_lr, networks_lr, node_angles_lr = info_lr
labels = labels_lr[:99]
node_idx = np.lexsort((labels.T[-1], [l[-1] for l in labels.T[1]]))
node_idx = np.hstack((node_idx[:49], node_idx[49:][::-1]))
labels_ord = labels[node_idx]
coords_lr_ord = coords_lr[node_idx]
names = labels_ord.T[1]
names = np.array([n.replace("_", " ") for n in names])
node_angles = circular_layout(names.tolist(),
names.tolist(),
start_between=False,
start_pos=90,
group_boundaries=[0, 49, len(names) / 2.+1],
group_sep=3.)
node_network = labels_ord.T[3]
networks, count = np.unique(node_network, return_counts=True)
color_network = sns.color_palette("Paired", len(networks)+1)
colors_ = dict(zip(networks, color_network[1:]))
colors = [colors_[n] for n in node_network]
colors_brain = [colors_[n] for n in labels.T[3][:99]]
df_mpsi = filter_dataframe(mat_results, task=['CONN'])
for i, row in df_mpsi.iterrows():
matrix = array_to_matrix(row['features'], copy=True, diagonal_filler=0.)
band = row['band']
k = row['k']
if np.isnan(k):
k = 0
key = "band: %s | k: %d"%(band, k)
upper = matrix[ | np.triu_indices(matrix.shape[0], k=1) | numpy.triu_indices |
#Author: <NAME>, EMBL Heidelberg, Sachse Group (2018)
import numpy as np
import time
import argparse, os, sys
import mrcfile
import math
from FDRutil import *
#*************************************************************
#****************** Commandline input ************************
#*************************************************************
cmdl_parser = argparse.ArgumentParser(
prog=sys.argv[0], description='*** Analyse density ***',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30), add_help=True);
cmdl_parser.add_argument('-em', '--em_map', default=[], nargs='*', required=True, help='Input filename of EM-frame maps');
cmdl_parser.add_argument('-p', '--apix', metavar="apix", type=float, required=True, help='pixel Size of input map');
cmdl_parser.add_argument('-lowPassFilter', '--lowPassFilter', type=float, required=False, help='Resolution to lowPass filter');
cmdl_parser.add_argument('-addFrames', '--addFrames', type=int, required=False, help='add Frames');
cmdl_parser.add_argument('-firstFrame', '--firstFrame', type=int, required=False, help='first frame to be used, counting starts with 0');
cmdl_parser.add_argument('-lastFrame', '--lastFrame', type=int, required=False, help='last frame to be used, counting ends with numFrames-1');
#--------------------------------------------------------------------------
def kernelRegression(frameData, providedResolution):
#*****************************************
#*********** kernel regression ***********
#*****************************************
bandwidth = 3;
maps = np.copy(frameData);
sizeMap = maps.shape;
numFrames = sizeMap[3];
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps <= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
#maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
regrMap = np.zeros(sizeMap);
#do the actual kernel regression
for frameInd in range(numFrames):
regrMapDenom = 0;
for tmpFrameInd in range(numFrames):
dist = np.abs(tmpFrameInd - frameInd);
if dist > 4:
continue;
sampleWeight = (1.0/(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-0.5*dist/(bandwidth**2));
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd] + sampleWeight*maps[ :, :, :, tmpFrameInd] ;
regrMapDenom = regrMapDenom + sampleWeight;
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd]/regrMapDenom;
#************************************
#*********** do plotting ************
#************************************
import matplotlib.pyplot as plt
fig, ax = plt.subplots(5, 5);
for row in ax:
for col in row:
xInd = np.random.randint(0, sizeMap[0]);
yInd = np.random.randint(0, sizeMap[1]);
zInd = np.random.randint(0, sizeMap[2]);
indices = np.arange(sizeMap[3]);
y1 = regrMap[ xInd, yInd, zInd, :];
y2 = maps[ xInd, yInd, zInd, :];
col.plot(indices, y1);
col.plot(indices, y2);
col.set_xticklabels([]);
col.set_yticklabels([]);
plt.savefig("Regression_falloff.pdf");
print("PDF saved ...");
plt.close();
#calculate weights
weightMap = np.copy(regrMap);
sumMap = np.sum(regrMap, 3);
sumMap = sumMap.astype(float);
sumMap[sumMap==0.0] = np.nan;
for frameInd in range(sizeMap[3]):
weightMap[:, :, :, frameInd] = weightMap[:, :, :, frameInd]/sumMap;
weightMap[np.isnan(weightMap)] = 1.0/numFrames;
return regrMap, weightMap;
#--------------------------------------------------------------------------
def linearizedModel(frameData, providedResolution):
#****************************************
#*********** fit linear model ***********
#****************************************
maps = np.copy(frameData);
sizeMap = maps.shape;
#print(sizeMap);
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps<= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
#calc b1
S_xy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for i in range(sizeMap[3]):
S_xy = S_xy + (indexMap[i] - x_mean)*(maps[ :, :, :, i ] - y_mean);
S_xx = S_xx + (indexMap[i] - x_mean)**2;
#S_yy = S_yy + (maps[ :, :, :, i ] - y_mean)*(maps[ :, :, :, i ] - y_mean);
#calculate regression coefficients
b1 = np.divide(S_xy, S_xx);
b0 = y_mean - b1 * x_mean;
#calculate some residual statistics
#S_residuals = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#for frameInd in range(sizeMap[3]):
# prediction = b0 + b1*(frameInd + 1.0);
# S_residuals = S_residuals + (maps[ :, :, :, i ] - prediction)**2;
#S_residuals[S_residuals == 0] = np.nan;
#calculate t-test upon b1, H_0: b1=0, H1: b1<0
#tTestMap = b1/(np.sqrt(S_residuals/(float(sizeMap[3]-2.0))*S_xx));
#np.random.shuffle(b1);
return b0, b1;
#--------------------------------------------------------------------------
def relativeSNR(weightMap, apix):
sizeMap = weightMap.shape;
equalWeightFactor = 1.0/float(sizeMap[3]);
S_xq = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frameInd in range(sizeMap[3]):
S_xq = S_xq + weightMap[:,:,:, frameInd]*equalWeightFactor;
S_xx = S_xx + equalWeightFactor**2;
S_yy = S_yy + weightMap[:,:,:, frameInd]**2;
SNRmap = np.divide((np.sqrt(S_xx)*np.sqrt(S_yy)), S_xq);
#write the SNR map
SNRMapMRC = mrcfile.new('SNR.mrc', overwrite=True);
SNRmap = | np.float32(SNRmap) | numpy.float32 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from gammapy.irf import EffectiveAreaTable
from gammapy.modeling import Fit
from gammapy.modeling.models import (
ExpCutoffPowerLawSpectralModel,
PowerLawSpectralModel,
SkyModel,
)
from gammapy.spectrum import CountsSpectrum, SpectrumDataset, SpectrumDatasetOnOff
from gammapy.utils.random import get_random_state
from gammapy.utils.testing import requires_data, requires_dependency
@requires_dependency("iminuit")
class TestFit:
"""Test fit on counts spectra without any IRFs"""
def setup(self):
self.nbins = 30
binning = np.logspace(-1, 1, self.nbins + 1) * u.TeV
self.source_model = SkyModel(
spectral_model=PowerLawSpectralModel(
index=2, amplitude=1e5 * u.Unit("cm-2 s-1 TeV-1"), reference=0.1 * u.TeV
)
)
bkg_model = PowerLawSpectralModel(
index=3, amplitude=1e4 * u.Unit("cm-2 s-1 TeV-1"), reference=0.1 * u.TeV
)
self.alpha = 0.1
random_state = get_random_state(23)
npred = self.source_model.spectral_model.integral(
binning[:-1], binning[1:]
).value
source_counts = random_state.poisson(npred)
self.src = CountsSpectrum(
energy_lo=binning[:-1], energy_hi=binning[1:], data=source_counts
)
self.src.livetime = 1 * u.s
self.aeff = EffectiveAreaTable.from_constant(binning, "1 cm2")
npred_bkg = bkg_model.integral(binning[:-1], binning[1:]).value
bkg_counts = random_state.poisson(npred_bkg)
off_counts = random_state.poisson(npred_bkg * 1.0 / self.alpha)
self.bkg = CountsSpectrum(
energy_lo=binning[:-1], energy_hi=binning[1:], data=bkg_counts
)
self.off = CountsSpectrum(
energy_lo=binning[:-1], energy_hi=binning[1:], data=off_counts
)
def test_cash(self):
"""Simple CASH fit to the on vector"""
dataset = SpectrumDataset(
models=self.source_model,
counts=self.src,
aeff=self.aeff,
livetime=self.src.livetime,
)
npred = dataset.npred().data
assert_allclose(npred[5], 660.5171, rtol=1e-5)
stat_val = dataset.stat_sum()
assert_allclose(stat_val, -107346.5291, rtol=1e-5)
self.source_model.parameters["index"].value = 1.12
fit = Fit([dataset])
result = fit.run()
# These values are check with sherpa fits, do not change
pars = result.parameters
assert_allclose(pars["index"].value, 1.995525, rtol=1e-3)
assert_allclose(pars["amplitude"].value, 100245.9, rtol=1e-3)
def test_wstat(self):
"""WStat with on source and background spectrum"""
on_vector = self.src.copy()
on_vector.data += self.bkg.data
dataset = SpectrumDatasetOnOff(
counts=on_vector,
counts_off=self.off,
aeff=self.aeff,
livetime=self.src.livetime,
acceptance=1,
acceptance_off=1 / self.alpha,
)
dataset.models = self.source_model
self.source_model.parameters.index = 1.12
fit = Fit([dataset])
result = fit.run()
pars = self.source_model.parameters
assert_allclose(pars["index"].value, 1.997342, rtol=1e-3)
| assert_allclose(pars["amplitude"].value, 100245.187067, rtol=1e-3) | numpy.testing.assert_allclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import os
import theano
import numpy
from wordclasses import TheanoBigramOptimizer, NumpyBigramOptimizer
from theanolm.vocabulary import Vocabulary
from theanolm.vocabulary import compute_word_counts, BigramStatistics
class TestBigramOptimizer(unittest.TestCase):
def setUp(self):
theano.config.compute_test_value = 'warn'
script_path = os.path.dirname(os.path.realpath(__file__))
sentences_path = os.path.join(script_path, 'sentences.txt')
self.sentences_file = open(sentences_path)
self.num_classes = 2
word_counts = compute_word_counts([self.sentences_file])
self.vocabulary = Vocabulary.from_word_counts(word_counts,
self.num_classes)
self.sentences_file.seek(0)
self.statistics = BigramStatistics([self.sentences_file], self.vocabulary)
def tearDown(self):
self.sentences_file.close()
def assert_optimizers_equal(self, numpy_optimizer, theano_optimizer):
self.assertTrue(numpy.array_equal(numpy_optimizer._word_counts, theano_optimizer._word_counts.get_value()))
self.assertEqual((numpy_optimizer._ww_counts - theano_optimizer._ww_counts.get_value()).nnz, 0)
self.assertTrue(numpy.array_equal(numpy_optimizer._class_counts, theano_optimizer._class_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cc_counts, theano_optimizer._cc_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cw_counts, theano_optimizer._cw_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._wc_counts, theano_optimizer._wc_counts.get_value()))
def test_statistics(self):
num_words = 8
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
self.assertEqual(theano_optimizer.vocabulary_size, num_words)
self.assertEqual(numpy_optimizer.vocabulary_size, num_words)
self.assertEqual(theano_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(numpy_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(len(theano_optimizer._word_to_class.get_value()), num_words)
self.assertEqual(len(numpy_optimizer._word_to_class), num_words)
sos_word_id = self.vocabulary.word_to_id['<s>']
a_word_id = self.vocabulary.word_to_id['a']
b_word_id = self.vocabulary.word_to_id['b']
c_word_id = self.vocabulary.word_to_id['c']
d_word_id = self.vocabulary.word_to_id['d']
e_word_id = self.vocabulary.word_to_id['e']
unk_word_id = self.vocabulary.word_to_id['<unk>']
eos_word_id = self.vocabulary.word_to_id['</s>']
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(len(numpy_optimizer._word_counts), num_words)
self.assertEqual(numpy_optimizer._word_counts[sos_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[a_word_id], 13)
self.assertEqual(numpy_optimizer._word_counts[b_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[c_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[d_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[e_word_id], 15)
self.assertEqual(numpy_optimizer._word_counts[unk_word_id], 0)
self.assertEqual(numpy_optimizer._word_counts[eos_word_id], 11)
self.assertEqual(numpy_optimizer._ww_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._ww_counts.shape[1], num_words)
self.assertEqual(len(numpy_optimizer._class_counts), self.num_classes + 3)
self.assertEqual(numpy_optimizer._cc_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[1], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[1], self.num_classes + 3)
def test_move_and_back(self):
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
orig_class_counts = numpy.copy(numpy_optimizer._class_counts)
orig_cc_counts = numpy.copy(numpy_optimizer._cc_counts)
orig_cw_counts = numpy.copy(numpy_optimizer._cw_counts)
orig_wc_counts = numpy.copy(numpy_optimizer._wc_counts)
word_id = self.vocabulary.word_to_id['d']
orig_class_id = numpy_optimizer.get_word_class(word_id)
new_class_id = 3 if orig_class_id != 3 else 4
numpy_optimizer._move(word_id, new_class_id)
theano_optimizer._move(word_id, new_class_id)
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(numpy.count_nonzero(numpy_optimizer._class_counts != orig_class_counts), 2)
self.assertEqual(numpy.sum(numpy_optimizer._class_counts), numpy.sum(orig_class_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._cc_counts != orig_cc_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._cc_counts), numpy.sum(orig_cc_counts))
self.assertGreater( | numpy.count_nonzero(numpy_optimizer._cw_counts != orig_cw_counts) | numpy.count_nonzero |
import numpy as np
import scipy.optimize
from numpy.polynomial.legendre import Legendre, legval, legfit
from lvmspec.quicklook import qlexceptions,qllogger
qlog=qllogger.QLLogger("QuickLook",20)
log=qlog.getlog()
def sigmas_from_arc(wave,flux,ivar,linelist,n=2):
"""
Gaussian fitting of listed arc lines and return corresponding sigmas in pixel units
Args:
linelist: list of lines (A) for which fit is to be done
n: fit region half width (in bin units): n=2 bins => (2*n+1)=5 bins fitting window.
"""
nwave=wave.shape
#- select the closest match to given lines
ind=[(np.abs(wave-line)).argmin() for line in linelist]
#- fit gaussian obout the peaks
meanwaves=np.zeros(len(ind))
emeanwaves=np.zeros(len(ind))
sigmas=np.zeros(len(ind))
esigmas=np.zeros(len(ind))
for jj,index in enumerate(ind):
thiswave=wave[index-n:index+n+1]-linelist[jj] #- fit window about 0
thisflux=flux[index-n:index+n+1]
thisivar=ivar[index-n:index+n+1]
spots=thisflux/thisflux.sum()
errors=1./np.sqrt(thisivar)
errors/=thisflux.sum()
popt,pcov=scipy.optimize.curve_fit(_gauss_pix,thiswave,spots)
meanwaves[jj]=popt[0]+linelist[jj]
emeanwaves[jj]=pcov[0,0]**0.5
sigmas[jj]=popt[1]
esigmas[jj]=(pcov[1,1]**0.5)
k=np.logical_and(~np.isnan(esigmas),esigmas!=np.inf)
sigmas=sigmas[k]
meanwaves=meanwaves[k]
esigmas=esigmas[k]
return meanwaves,emeanwaves,sigmas,esigmas
def fit_wsigmas(means,wsigmas,ewsigmas,npoly=2,domain=None):
#- return callable legendre object
wt=1/ewsigmas**2
legfit = Legendre.fit(means, wsigmas, npoly, domain=domain,w=wt)
return legfit
def _gauss_pix(x,mean,sigma):
x=(np.asarray(x,dtype=float)-mean)/(sigma*np.sqrt(2))
dx=x[1]-x[0] #- uniform spacing
edges= np.concatenate((x-dx/2, x[-1:]+dx/2))
y=scipy.special.erf(edges)
return (y[1:]-y[:-1])/2
def process_arc(frame,linelist=None,npoly=2,nbins=2,domain=None):
"""
frame: lvmspec.frame.Frame object, preumably resolution not evaluated.
linelist: line list to fit
npoly: polynomial order for sigma expansion
nbins: no of bins for the half of the fitting window
return: coefficients of the polynomial expansion
"""
nspec=frame.flux.shape[0]
if linelist is None:
camera=frame.meta["CAMERA"]
#- load arc lines
from lvmspec.bootcalib import load_arcline_list, load_gdarc_lines,find_arc_lines
llist=load_arcline_list(camera)
dlamb,gd_lines=load_gdarc_lines(camera,llist)
linelist=gd_lines
#linelist=[5854.1101,6404.018,7034.352,7440.9469] #- not final
log.info("No line list configured. Fitting for lines {}".format(linelist))
coeffs=np.zeros((nspec,npoly+1)) #- coeffs array
#- amend line list to only include lines in given wavelength range
wave=frame.wave
if wave[0] >= linelist[0]:
noline_ind_lo=np.where( | np.array(linelist) | numpy.array |
"""
Demonstrate aXe trace polynomials.
Initial code taken from `(Brammer, Pirzkal, & Ryan 2014) <https://github.com/WFC3Grism/CodeDescription>`_, which contains a detailed
explanation how the grism configuration parameters and coefficients are defined and evaluated.
"""
import os
import numpy as np
from . import GRIZLI_PATH
class aXeConf():
def __init__(self, conf_file='WFC3.IR.G141.V2.5.conf'):
"""Read an aXe-compatible configuration file
Parameters
----------
conf_file: str
Filename of the configuration file to read
"""
if conf_file is not None:
self.conf = self.read_conf_file(conf_file)
self.conf_file = conf_file
self.count_beam_orders()
# Global XOFF/YOFF offsets
if 'XOFF' in self.conf.keys():
self.xoff = np.float(self.conf['XOFF'])
else:
self.xoff = 0.
if 'YOFF' in self.conf.keys():
self.yoff = np.float(self.conf['YOFF'])
else:
self.yoff = 0.
def read_conf_file(self, conf_file='WFC3.IR.G141.V2.5.conf'):
"""Read an aXe config file, convert floats and arrays
Parameters
----------
conf_file: str
Filename of the configuration file to read.
Parameters are stored in an OrderedDict in `self.conf`.
"""
from collections import OrderedDict
conf = OrderedDict()
fp = open(conf_file)
lines = fp.readlines()
fp.close()
for line in lines:
# empty / commented lines
if (line.startswith('#')) | (line.strip() == '') | ('"' in line):
continue
# split the line, taking out ; and # comments
spl = line.split(';')[0].split('#')[0].split()
param = spl[0]
if len(spl) > 2:
value = np.cast[float](spl[1:])
else:
try:
value = float(spl[1])
except:
value = spl[1]
conf[param] = value
return conf
def count_beam_orders(self):
"""Get the maximum polynomial order in DYDX or DLDP for each beam
"""
self.orders = {}
for beam in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']:
order = 0
while 'DYDX_{0:s}_{1:d}'.format(beam, order) in self.conf.keys():
order += 1
while 'DLDP_{0:s}_{1:d}'.format(beam, order) in self.conf.keys():
order += 1
self.orders[beam] = order-1
def get_beams(self):
"""Get beam parameters and read sensitivity curves
"""
import os
from collections import OrderedDict
from astropy.table import Table, Column
self.dxlam = OrderedDict()
self.nx = OrderedDict()
self.sens = OrderedDict()
self.beams = []
for beam in self.orders:
if self.orders[beam] > 0:
self.beams.append(beam)
self.dxlam[beam] = np.arange(self.conf['BEAM{0}'.format(beam)].min(), self.conf['BEAM{0}'.format(beam)].max(), dtype=int)
self.nx[beam] = int(self.dxlam[beam].max()-self.dxlam[beam].min())+1
self.sens[beam] = Table.read('{0}/{1}'.format(os.path.dirname(self.conf_file), self.conf['SENSITIVITY_{0}'.format(beam)]))
#self.sens[beam].wave = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
#self.sens[beam].sens = np.cast[np.double](self.sens[beam]['SENSITIVITY'])
# Need doubles for interpolating functions
for col in self.sens[beam].colnames:
data = np.cast[np.double](self.sens[beam][col])
self.sens[beam].remove_column(col)
self.sens[beam].add_column(Column(data=data, name=col))
# Scale BEAM F
if (beam == 'F') & ('G141' in self.conf_file):
self.sens[beam]['SENSITIVITY'] *= 0.35
if (beam == 'B') & ('G141' in self.conf_file):
if self.conf['SENSITIVITY_B'] == 'WFC3.IR.G141.0th.sens.1.fits':
self.sens[beam]['SENSITIVITY'] *= 2
# wave = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
# sens = np.cast[np.double](self.sens[beam]['SENSITIVITY']
# self.sens[beam]['WAVELENGTH'] = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
# self.sens[beam]['SENSITIVITY'] = )
self.beams.sort()
def field_dependent(self, xi, yi, coeffs):
"""aXe field-dependent coefficients
See the `aXe manual <http://axe.stsci.edu/axe/manual/html/node7.html#SECTION00721200000000000000>`_ for a description of how the field-dependent coefficients are specified.
Parameters
----------
xi, yi : float or array-like
Coordinate to evaluate the field dependent coefficients, where
`xi = x-REFX` and `yi = y-REFY`.
coeffs : array-like
Field-dependency coefficients
Returns
-------
a : float or array-like
Evaluated field-dependent coefficients
"""
# number of coefficients for a given polynomial order
# 1:1, 2:3, 3:6, 4:10, order:order*(order+1)/2
if isinstance(coeffs, float):
order = 1
else:
order = int(-1+np.sqrt(1+8*len(coeffs))) // 2
# Build polynomial terms array
# $a = a_0+a_1x_i+a_2y_i+a_3x_i^2+a_4x_iy_i+a_5yi^2+$ ...
xy = []
for p in range(order):
for px in range(p+1):
# print 'x**%d y**%d' %(p-px, px)
xy.append(xi**(p-px)*yi**(px))
# Evaluate the polynomial, allowing for N-dimensional inputs
a = np.sum((np.array(xy).T*coeffs).T, axis=0)
return a
def evaluate_dp(self, dx, dydx):
"""Evalate arc length along the trace given trace polynomial coefficients
Parameters
----------
dx : array-like
x pixel to evaluate
dydx : array-like
Coefficients of the trace polynomial
Returns
-------
dp : array-like
Arc length along the trace at position `dx`.
For `dydx` polynomial orders 0, 1 or 2, integrate analytically.
Higher orders must be integrated numerically.
**Constant:**
.. math:: dp = dx
**Linear:**
.. math:: dp = \sqrt{1+\mathrm{DYDX}[1]}\cdot dx
**Quadratic:**
.. math:: u = \mathrm{DYDX}[1] + 2\ \mathrm{DYDX}[2]\cdot dx
.. math:: dp = (u \sqrt{1+u^2} + \mathrm{arcsinh}\ u) / (4\cdot \mathrm{DYDX}[2])
"""
# dp is the arc length along the trace
# $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
poly_order = len(dydx)-1
if (poly_order == 2):
if np.abs(np.unique(dydx[2])).max() == 0:
poly_order = 1
if poly_order == 0: # dy=0
dp = dx
elif poly_order == 1: # constant dy/dx
dp = np.sqrt(1+dydx[1]**2)*(dx)
elif poly_order == 2: # quadratic trace
u0 = dydx[1]+2*dydx[2]*(0)
dp0 = (u0*np.sqrt(1+u0**2)+np.arcsinh(u0))/(4*dydx[2])
u = dydx[1]+2*dydx[2]*(dx)
dp = (u*np.sqrt(1+u**2)+np.arcsinh(u))/(4*dydx[2])-dp0
else:
# high order shape, numerical integration along trace
# (this can be slow)
xmin = np.minimum((dx).min(), 0)
xmax = np.maximum((dx).max(), 0)
xfull = np.arange(xmin, xmax)
dyfull = 0
for i in range(1, poly_order):
dyfull += i*dydx[i]*(xfull-0.5)**(i-1)
# Integrate from 0 to dx / -dx
dpfull = xfull*0.
lt0 = xfull < 0
if lt0.sum() > 1:
dpfull[lt0] = np.cumsum(np.sqrt(1+dyfull[lt0][::-1]**2))[::-1]
dpfull[lt0] *= -1
#
gt0 = xfull > 0
if gt0.sum() > 0:
dpfull[gt0] = np.cumsum(np.sqrt(1+dyfull[gt0]**2))
dp = np.interp(dx, xfull, dpfull)
if dp[-1] == dp[-2]:
dp[-1] = dp[-2]+np.diff(dp)[-2]
return dp
def get_beam_trace(self, x=507, y=507, dx=0., beam='A', fwcpos=None):
"""Get an aXe beam trace for an input reference pixel and list of output x pixels `dx`
Parameters
----------
x, y : float or array-like
Evaluate trace definition at detector coordinates `x` and `y`.
dx : float or array-like
Offset in x pixels from `(x,y)` where to compute trace offset and
effective wavelength
beam : str
Beam name (i.e., spectral order) to compute. By aXe convention,
`beam='A'` is the first order, 'B' is the zeroth order and
additional beams are the higher positive and negative orders.
fwcpos : None or float
For NIRISS, specify the filter wheel position to compute the
trace rotation
Returns
-------
dy : float or array-like
Center of the trace in y pixels offset from `(x,y)` evaluated at
`dx`.
lam : float or array-like
Effective wavelength along the trace evaluated at `dx`.
"""
NORDER = self.orders[beam]+1
xi, yi = x-self.xoff, y-self.yoff
xoff_beam = self.field_dependent(xi, yi, self.conf['XOFF_{0}'.format(beam)])
yoff_beam = self.field_dependent(xi, yi, self.conf['YOFF_{0}'.format(beam)])
# y offset of trace (DYDX)
dydx = np.zeros(NORDER) # 0 #+1.e-80
dydx = [0]*NORDER
for i in range(NORDER):
if 'DYDX_{0:s}_{1:d}'.format(beam, i) in self.conf.keys():
coeffs = self.conf['DYDX_{0:s}_{1:d}'.format(beam, i)]
dydx[i] = self.field_dependent(xi, yi, coeffs)
# $dy = dydx_0+dydx_1 dx+dydx_2 dx^2+$ ...
dy = yoff_beam
for i in range(NORDER):
dy += dydx[i]*(dx-xoff_beam)**i
# wavelength solution
dldp = np.zeros(NORDER)
dldp = [0]*NORDER
for i in range(NORDER):
if 'DLDP_{0:s}_{1:d}'.format(beam, i) in self.conf.keys():
coeffs = self.conf['DLDP_{0:s}_{1:d}'.format(beam, i)]
dldp[i] = self.field_dependent(xi, yi, coeffs)
self.eval_input = {'x': x, 'y': y, 'beam': beam, 'dx': dx,
'fwcpos': fwcpos}
self.eval_output = {'xi': xi, 'yi': yi, 'dldp': dldp, 'dydx': dydx,
'xoff_beam': xoff_beam, 'yoff_beam': yoff_beam,
'dy': dy}
dp = self.evaluate_dp(dx-xoff_beam, dydx)
# ## dp is the arc length along the trace
# ## $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
# if self.conf['DYDX_ORDER_%s' %(beam)] == 0: ## dy=0
# dp = dx-xoff_beam
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 1: ## constant dy/dx
# dp = np.sqrt(1+dydx[1]**2)*(dx-xoff_beam)
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 2: ## quadratic trace
# u0 = dydx[1]+2*dydx[2]*(0)
# dp0 = (u0*np.sqrt(1+u0**2)+np.arcsinh(u0))/(4*dydx[2])
# u = dydx[1]+2*dydx[2]*(dx-xoff_beam)
# dp = (u*np.sqrt(1+u**2)+np.arcsinh(u))/(4*dydx[2])-dp0
# else:
# ## high order shape, numerical integration along trace
# ## (this can be slow)
# xmin = np.minimum((dx-xoff_beam).min(), 0)
# xmax = np.maximum((dx-xoff_beam).max(), 0)
# xfull = np.arange(xmin, xmax)
# dyfull = 0
# for i in range(1, NORDER):
# dyfull += i*dydx[i]*(xfull-0.5)**(i-1)
#
# ## Integrate from 0 to dx / -dx
# dpfull = xfull*0.
# lt0 = xfull <= 0
# if lt0.sum() > 1:
# dpfull[lt0] = np.cumsum(np.sqrt(1+dyfull[lt0][::-1]**2))[::-1]
# dpfull[lt0] *= -1
# #
# gt0 = xfull >= 0
# if gt0.sum() > 0:
# dpfull[gt0] = np.cumsum(np.sqrt(1+dyfull[gt0]**2))
#
# dp = np.interp(dx-xoff_beam, xfull, dpfull)
# Evaluate dldp
lam = dp*0.
for i in range(NORDER):
lam += dldp[i]*dp**i
# NIRISS rotation?
if fwcpos is not None:
if 'FWCPOS_REF' not in self.conf.keys():
print('Parameter fwcpos={0} supplied but no FWCPOS_REF in {1:s}'.format(fwcpos, self.conf_file))
return dy, lam
order = self.conf['DYDX_ORDER_{0}'.format(beam)]
if order != 2:
print('ORDER={0:d} not supported for NIRISS rotation'.format(order))
return dy, lam
theta = (fwcpos - self.conf['FWCPOS_REF'])/180*np.pi*1
theta *= -1 # DMS rotation
# print('DMS')
if theta == 0:
return dy, lam
# For the convention of swapping/inverting axes for GR150C
# if 'GR150C' in self.conf_file:
# theta = -theta
# If theta is small, use a small angle approximation.
# Otherwise, 1./tan(theta) blows up and results in numerical
# noise.
xp = (dx-xoff_beam)/np.cos(theta)
if (1-np.cos(theta) < 5.e-8):
#print('Approximate!', xoff_beam, np.tan(theta))
dy = dy + (dx-xoff_beam)*np.tan(theta)
delta = 0.
# print('Approx')
else:
# Full transformed trace coordinates
c = dydx
#print('Not approx')
beta = c[1]+2*c[2]*xp-1/np.tan(theta)
chi = c[0]+c[1]*xp+c[2]*xp**2
if theta < 0:
psi = (-beta+np.sqrt(beta**2-4*c[2]*chi))
psi *= 1./2/c[2]/np.tan(theta)
delta = psi*np.tan(theta)
dy = dx*np.tan(theta) + psi/np.cos(theta)
else:
psi = (-beta-np.sqrt(beta**2-4*c[2]*chi))
psi *= 1./2/c[2]/np.tan(theta)
delta = psi*np.tan(theta)
dy = dx*np.tan(theta) + psi/np.cos(theta)
# Evaluate wavelength at 'prime position along the trace
dp = self.evaluate_dp(xp+delta, dydx)
lam = dp*0.
for i in range(NORDER):
lam += dldp[i]*dp**i
return dy, lam
def show_beams(self, xy=None, beams=['E', 'D', 'C', 'B', 'A']):
"""
Make a demo plot of the beams of a given configuration file
"""
import matplotlib.pyplot as plt
x0, x1 = 507, 507
dx = | np.arange(-800, 1200) | numpy.arange |
import numpy as np
import logging
import six
import loopy as lp
import cantera as ct
from nose.plugins.attrib import attr
from unittest.case import SkipTest
from parameterized import parameterized
try:
from scipy.sparse import csr_matrix, csc_matrix
except ImportError:
csr_matrix = None
csc_matrix = None
from pyjac.core.rate_subs import (
get_concentrations,
get_rop, get_rop_net, get_spec_rates, get_molar_rates, get_thd_body_concs,
get_rxn_pres_mod, get_reduced_pressure_kernel, get_lind_kernel,
get_sri_kernel, get_troe_kernel, get_simple_arrhenius_rates,
polyfit_kernel_gen, get_plog_arrhenius_rates, get_cheb_arrhenius_rates,
get_rev_rates, get_temperature_rate, get_extra_var_rates)
from pyjac.loopy_utils.loopy_utils import (
loopy_options, kernel_call, set_adept_editor, populate, get_target)
from pyjac.core.enum_types import RateSpecialization, FiniteDifferenceMode
from pyjac.core.create_jacobian import (
dRopi_dnj, dci_thd_dnj, dci_lind_dnj, dci_sri_dnj, dci_troe_dnj,
total_specific_energy, dTdot_dnj, dEdot_dnj, thermo_temperature_derivative,
dRopidT, dRopi_plog_dT, dRopi_cheb_dT, dTdotdT, dci_thd_dT, dci_lind_dT,
dci_troe_dT, dci_sri_dT, dEdotdT, dTdotdE, dEdotdE, dRopidE, dRopi_plog_dE,
dRopi_cheb_dE, dci_thd_dE, dci_lind_dE, dci_troe_dE, dci_sri_dE,
determine_jac_inds, reset_arrays, get_jacobian_kernel,
finite_difference_jacobian)
from pyjac.core import array_creator as arc
from pyjac.core.enum_types import reaction_type, falloff_form
from pyjac.kernel_utils import kernel_gen as k_gen
from pyjac.tests import get_test_langs, TestClass
from pyjac.tests.test_utils import (
kernel_runner, get_comparable, _generic_tester,
_full_kernel_test, with_check_inds, inNd, skipif, xfail)
from pyjac.core.enum_types import KernelType
from pyjac import utils
class editor(object):
def __init__(self, independent, dependent,
problem_size, order, do_not_set=[],
skip_on_missing=None):
def __replace_problem_size(shape):
new_shape = []
for x in shape:
if x != arc.problem_size.name:
new_shape.append(x)
else:
new_shape.append(problem_size)
return tuple(new_shape)
assert len(independent.shape) == 2
self.independent = independent.copy(shape=__replace_problem_size(
independent.shape))
indep_size = independent.shape[1]
assert len(dependent.shape) == 2
self.dependent = dependent.copy(shape=__replace_problem_size(
dependent.shape))
dep_size = dependent.shape[1]
self.problem_size = problem_size
# create the jacobian
self.output = arc.creator('jac', np.float64,
(problem_size, dep_size, indep_size),
order=order)
self.output = self.output(*['i', 'j', 'k'])[0]
self.do_not_set = utils.listify(do_not_set)
self.skip_on_missing = skip_on_missing
def set_single_kernel(self, single_kernel):
"""
It's far easier to use two generated kernels, one that uses the full
problem size (for calling via loopy), and another that uses a problem
size of 1, to work with Adept indexing in the AD kernel
"""
self.single_kernel = single_kernel
def set_skip_on_missing(self, func):
"""
If set, skip if the :class:`kernel_info` returned by this function
is None
"""
self.skip_on_missing = func
def __call__(self, knl):
return set_adept_editor(knl, self.single_kernel, self.problem_size,
self.independent, self.dependent, self.output,
self.do_not_set)
# various convenience wrappers
def _get_fall_call_wrapper():
def fall_wrapper(loopy_opts, namestore, test_size):
return get_simple_arrhenius_rates(loopy_opts, namestore,
test_size, falloff=True)
return fall_wrapper
def _get_plog_call_wrapper(rate_info):
def plog_wrapper(loopy_opts, namestore, test_size):
if rate_info['plog']['num']:
return get_plog_arrhenius_rates(loopy_opts, namestore,
rate_info['plog']['max_P'],
test_size)
return plog_wrapper
def _get_cheb_call_wrapper(rate_info):
def cheb_wrapper(loopy_opts, namestore, test_size):
if rate_info['cheb']['num']:
return get_cheb_arrhenius_rates(loopy_opts, namestore,
| np.max(rate_info['cheb']['num_P']) | numpy.max |
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
def density(tensor):
"""
Computes the ratio of nonzeros to total elements in a tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:return: Ratio of nonzeros to total elements
:rtype: `float`
"""
t = tensor.view(-1)
return float(t.nonzero().numel()) / float(t.numel())
def sparsity(tensor):
"""
Computes the ratio of zeros to total elements in a tensor.
:param tensor: PyTorch tensor
:type tensor: torch.Tensor
:return: Ratio of zeros to total elements
:rtype: `float`
"""
return 1. - density(tensor)
def threshold(tensor, density):
"""
Computes a magnitude-based threshold for given tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param density: Desired ratio of nonzeros to total elements
:type density: `float`
:return: Magnitude threshold
:rtype: `float`
"""
tf = tensor.abs().view(-1)
numel = int(density * tf.numel())
if numel == 0:
raise RuntimeError('Provided density value causes model to be zero.')
topk, _ = torch.topk(tf.abs(), numel, sorted=True)
return topk.data[-1]
def aggregate(tensor, blocksize, criteria):
"""
Aggregates tensor dimensions according to criteria.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param blocksize: Size of blocks to aggregate
:type blocksize: `Tuple(int)`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Aggregated tensor
:rtype: `torch.Tensor`
"""
if tensor.dim() != len(blocksize):
raise RuntimeError('Tensor and block dimensions do not match')
ndim = tensor.dim()
blocksize_flat = np.prod(np.array(blocksize))
shape = np.array(tensor.shape)
repeats = (shape / blocksize).astype(int)
divcheck = (shape % blocksize).astype(int)
if not np.all(divcheck == 0):
raise TypeError('Block size must be divisible by tensor size')
tmpshape = np.column_stack([repeats, blocksize]).ravel()
order = np.arange(len(tmpshape))
order = np.concatenate([order[::2], order[1::2]])
blocks = tensor.abs().reshape(tuple(tmpshape))
blocks = blocks.permute(tuple(order)).reshape(-1, *blocksize)
agg = criteria(blocks.reshape(-1, blocksize_flat), dim=1, keepdim=True)
return agg
def aggregate_neurons(tensor, criteria):
"""
Aggregates neurons (rows) in given weight matrix.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Neuron-aggregated tensor
:rtype: `torch.Tensor`
"""
return aggregate(tensor, (1, tensor.shape[1]), criteria)
def aggregate_filters(tensor, criteria):
"""
Aggregates 3D filters in given weight tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Filter-aggregated tensor
:rtype: `torch.Tensor`
"""
return aggregate(tensor, (1, *tensor.shape[1:]), criteria)
def simple_mask(tensor, threshold, align=None):
"""
Computes a simple binary mask for given magnitude threshold.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param threshold: magnitude threshold for pruning
:type threshold: `float`
:return: Mask
:rtype: `torch.Tensor`
"""
assert tensor.dim() == 1
if align is None:
return torch.ge(tensor.abs(), threshold)
else:
size = tensor.size(0)
if size < align:
raise RuntimeError('Tensor too small for given alignment')
t = tensor.abs()
nnz = torch.ge(t, threshold).nonzero().size(0)
nnz = int(nnz / align) * align
_, indices = torch.topk(t, nnz)
ones = torch.ones(nnz,
dtype=tensor.dtype,
layout=tensor.layout,
device=tensor.device)
mask = torch.zeros_like(tensor).scatter_(0, indices, ones)
return mask
def block_mask(tensor, threshold, blocksize, criteria, align=None):
"""
Computes an n-D binary mask for given magnitude threshold.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param threshold: magnitude threshold for pruning
:type threshold: `float`
:param blocksize: desired block size (Tuple)
:type blocksize: `Tuple`
:param criteria: aggregation function for thresholding (default: max)
:type criteria: `condensa.functional`
:return: Mask
:rtype: `torch.Tensor`
"""
# Original implementation at: https://stackoverflow.com/questions/42297115
# /numpy-split-cube-into-cubes/42298440#42298440
if tensor.dim() != len(blocksize):
raise RuntimeError('Tensor and block dimensions do not match')
ndim = tensor.dim()
blocksize_flat = np.prod(np.array(blocksize))
shape = np.array(tensor.shape)
repeats = (shape / blocksize).astype(int)
divcheck = (shape % blocksize).astype(int)
if not np.all(divcheck == 0):
raise TypeError('Block size must be divisible by tensor size')
tmpshape = | np.column_stack([repeats, blocksize]) | numpy.column_stack |
#!/usr/bin/env python
# coding: utf-8
############################################
##Utility functions used in processing seismic data.
############################################
#import needed packages.
import sys,time,scipy,obspy,pyasdf
import datetime,os, glob
import numpy as np
import pandas as pd
from numba import jit
import matplotlib.pyplot as plt
from collections import OrderedDict
from scipy.signal import tukey,hilbert
from obspy.clients.fdsn import Client
from obspy.core import Stream, Trace, read
from obspy.core.util.base import _get_function_from_entry_point
from obspy.signal.util import _npts2nfft
from obspy.signal.filter import bandpass
from scipy.fftpack import fft,ifft,next_fast_len
from obspy.core.inventory import Inventory, Network, Station, Channel, Site
from obspy.core.inventory import Inventory, Network, Station, Channel, Site
from obspy.geodetics.base import locations2degrees
from obspy.taup import TauPyModel
from shapely.geometry import Polygon, Point
import netCDF4 as nc
def rms(d):
return np.sqrt(np.mean(d**2))
def get_snr(d,t,dist,vmin,vmax,extend=0,offset=20,axis=1,getwindow=False,db=False):
"""
Get SNRs of the data with given distance, vmin, and vmax. The signal window will be
computed using vmin and vmax. The noise window will be the same length as the signal
window shifted toward the end with the given offset.
==========
d,t,dist,vmin,vmax: REQUIRED. data, time vector, distance, minimum velocity, maximum velocity.
extend: extend the window length from the computed window based on vmin and vmax. default is 20.
offset: offset between noise and signal windows, in seconds. default is 20.
axis: axis for the calculation. default 1.
db: Decibel or not. Default is False.
getwindow: return the indices of the signal and noise windows. only the start and end indices.
Default False.
=======RETURNS======
snr: [negative, positive]
[sig_idx_p,noise_idx_p],[sig_idx_n,noise_idx_n]: only return these windows when getwindow is True.
"""
d=np.array(d)
#get window index:
tmin=dist/vmax
tmax=extend + dist/vmin
dt=np.abs(t[1]-t[0])
shift=int(offset/dt)
halfn=int(len(t)/2) + 1
sig_idx_p=[int(tmin/dt)+halfn,int(tmax/dt)+halfn]
winlen=sig_idx_p[1]-sig_idx_p[0]+1
noise_idx_p= [sig_idx_p[0]+shift+winlen,sig_idx_p[1]+shift+winlen]
if noise_idx_p[1] > len(t) - 1:
raise ValueError("Noise window end [%d]is larger than the data length [%d]. Please adjust it."%(noise_idx_p[1],len(t)-1))
sig_idx_n=[len(t) - sig_idx_p[1], len(t) - sig_idx_p[0]]
noise_idx_n=[len(t) - noise_idx_p[1], len(t) - noise_idx_p[0]]
if d.ndim==1:
#axis is not used in this case
snr_n=rms(np.abs(d[sig_idx_n[0]:sig_idx_n[1]+1]))/rms(np.abs(d[noise_idx_n[0]:noise_idx_n[1]+1]))
snr_p=rms(np.abs(d[sig_idx_p[0]:sig_idx_p[1]+1]))/rms(np.abs(d[noise_idx_p[0]:noise_idx_p[1]+1]))
snr=[snr_n**2,snr_p**2]
elif d.ndim==2:
#
if axis==1:dim=0
else:dim=1
snr=np.ndarray((d.shape[dim],2))
for i in range(d.shape[dim]):
snr_n=rms(np.abs(d[i,sig_idx_n[0]:sig_idx_n[1]+1]))/rms(np.abs(d[i,noise_idx_n[0]:noise_idx_n[1]+1]))
snr_p=rms(np.abs(d[i,sig_idx_p[0]:sig_idx_p[1]+1]))/rms(np.abs(d[i,noise_idx_p[0]:noise_idx_p[1]+1]))
snr[i,:]=[snr_n**2,snr_p**2]
#
else:
raise ValueError("Only handles ndim <=2.")
snr=[np.nan,np.nan]
if db:
snr=10*np.log10(snr)
if getwindow:
return snr,[sig_idx_p,noise_idx_p],[sig_idx_n,noise_idx_n]
else:
return snr
##
def subsetindex(full,subset):
"""
Get the indices of the subset of a list.
"""
if isinstance(subset,str):subset=[subset]
idx=[]
for s in subset:
idx += [i for i, x in enumerate(full) if x == s]
return idx
#
def get_filelist(dir,extension=None,pattern=None,sort=True):
"""
Get list of files with absolute path, by specifying the format extension.
===========PARAMETERS=============
dir: directory containing the files.
extension: file extension (the ending format tag), for example "h5" for asdf file.
pattern: pattern to use in searching. Wildcards are NOT considered here.
sort: (optional) to sort the list, default is True.
============RETURN=============
flist: the list of file names with paths.
"""
if extension is None:
flist=[os.path.join(dir,f) for f in os.listdir(dir)]
else:
flist=[os.path.join(dir,f) for f in os.listdir(dir) if f[-len(extension):].lower()==extension]
if pattern is not None:
flist2=[]
for f in flist:
if f.find(pattern)>=0: flist2.append(f)
flist=flist2
if sort:
return sorted(flist)
else:
return flist
def slice_list(flist,step,preserve_end=True):
"""
Slice a lit of values, with given step. Different from utils.sliding_window(), this function
provides unique segments with NO overlaps. It works with generic Python list object.
========PARAMETERS============
flist: list to be sliced.
step: step or length of each segment.
preserve_end: if True the end element will be included, the last segment may have different length.
Default is True.
"""
step=int(step)
outlist=[]
if len(flist)<step:
outlist.append(flist)
else:
idxall=np.arange(0,len(flist),step)
if idxall[-1]<len(flist)-1 and preserve_end:
idxall=np.append(idxall,len(flist)-1) #make sure all files are considered.
if len(idxall)==1:
outlist=[flist[:idxall[0]]]
else:
for i in range(len(idxall)-1):
sublist=[flist[j] for j in np.arange(idxall[i],idxall[i+1])]
outlist.append(sublist)
#
return outlist
#
def generate_points_in_polygon(outline,spacing):
"""
Generate points in polygon, defined as a shapely.polygon object.
outline: list of (x,y) points that define the polygon.
spacing: spacing of the points to be generated.
"""
poly=Polygon(outline)
minx, miny, maxx, maxy = poly.bounds
x0=np.arange(minx-spacing,maxx+spacing,spacing)
y0=np.arange(miny-spacing,maxy+spacing,spacing)
pointsx=[]
pointsy=[]
for i in range(len(x0)):
for j in range(len(y0)):
p = Point(x0[i], y0[j])
if poly.contains(p):
pointsx.append(x0[i])
pointsy.append(y0[j])
return pointsx,pointsy
#
#
def points_in_polygon(outline,qx,qy):
"""
Get points that are within the given polygon. Returns the index list.
poly: list of (x,y) points that define the polygon
qx,qy: list of the x and y coordinats of the points
that are to be checked.
===RETURNS===
ix,iy: indices of x and y for points within the polygon.
"""
poly=Polygon(outline)
ix=[]
iy=[]
for i in range(len(qx)):
for j in range(len(qy)):
p = Point(qx[i], qy[j])
if poly.contains(p):
ix.append(i)
iy.append(j)
return ix,iy
#
#
def read_gmtlines(file,comment="#",segment=">"):
"""
Read GMT stype lines from text file. By default, the comment lines
start with "#" and segments are separated by lines starting with ">".
They can be specified if different than the defaults.
file - File name.
=====RETURNS====
dall, tags - Data (list of all 2-d arrays for all line segments) and tags.
"""
tags=[]
data=[]
dall=[]
with open(file,'r') as fo:
for line in fo:
idn=line.find("\n")
if idn>0: line=line[:idn] #trim ending LINE RETURN SYMBOL
if line[0] == comment: #skip comment line
pass
elif line[0]== segment:
tag=str(line[1:])
if tag.find("-L"):tag=tag[tag.find("-L")+2:]
tags.append(tag)
if len(data)>0:dall.append(np.array(data))
data=[]
else:
if line.find("\t") >0:
cols = line.split("\t")
else:
cols = line.split(" ")
data.append([float(i) for i in cols if len(i)>0 ])
dall.append(np.array(data))
return dall,tags
#
#
def listvar_ncmodel(dfile,metadata=False):
"""
Read 3D seismic model from netCDF file that follows IRIS EMC format.
dfile - Data file name.
var - variable name.
===RETURNS===
lon,lat,dep,val - coordinats and the 3-D model (val) for the specified variable.
"""
ds=nc.Dataset(dfile)
var=ds.variables
if metadata:
md=ds.__dict__
return var,md
else:
return var
def read_ncmodel3d(dfile,var,metadata=False):
"""
Read 3D seismic model from netCDF file that follows IRIS EMC format.
dfile - Data file name.
var - variable name.
metadata - get metadata or not. Default False.
===RETURNS===
lon,lat,dep,val - coordinats and the 3-D model (val) for the specified variable.
"""
ds=nc.Dataset(dfile)
lon=np.array(ds['longitude'][:])
lat=np.array(ds['latitude'][:])
dep=np.array(ds['depth'][:])
val=np.array(ds[var][:])
if metadata:
md=ds.__dict__
return dep,lat,lon,val,md
else:
return dep,lat,lon,val
def read_ncmodel2d(dfile,var,metadata=False):
"""
Read 2D seismic surface models from netCDF file that follows IRIS EMC format.
dfile - Data file name.
var - variable name.
metadata - get metadata or not. Default False.
===RETURNS===
lat,lon,val - coordinats and the 3-D model (val) for the specified variable.
md - Only returns this when metadata is True.
"""
ds=nc.Dataset(dfile)
lon=np.array(ds['longitude'][:])
lat=np.array(ds['latitude'][:])
dep=np.array(ds['depth'][:])
val=np.array(ds[var][:])
if metadata:
md=ds.__dict__
return lat,lon,val,md
else:
return lat,lon,val
#
#
def ncmodel_in_polygon(dfile,var,outlines,vmax=9000,allstats=False,surface=False,
lon_correction=0.0):
"""
Extract seismic model within polygons from 3d or 2d model in netCDF format.
===PARAMETERS===
dfile - Data file name.
var - variable name.
vmax - maximum value, above which will be set to numpy nan.
stats - If True, returns all statistics (mean, median, min, max, std) of
the model within the polygon. If False, only returns the mean 1d model.
Default False.
lon_correction - add correction to model longitude. Default 0.0.
===RETURNS===
dep - Depth grid. Returns only when surface is False.
val_mean - Average model value (1d profile in case of 3d ncmodel). Returns in all cases.
val_median,val_min,val_max,val_std - Only returns these when stats is True.
"""
if surface: #read in 2d surfaces
lat,lon,val=read_ncmodel2d(dfile,var)
lon += lon_correction
val[val>=vmax]=np.nan
val_mean=np.ndarray((len(outlines)))
if stats:
val_median=np.ndarray((len(outlines)))
val_min=np.ndarray((len(outlines)))
val_max=np.ndarray((len(outlines)))
val_std=np.ndarray((len(outlines)))
for idx,d in enumerate(outlines):
ix,iy=points_in_polygon(d,lon,lat)
val_mean[idx]=np.nanmean(np.nanmean(val[iy,ix]))
if stats:
val_median[idx]=np.nanmedian(np.nanmedian(val[iy,ix]))
val_min[idx]=np.nanmin(np.nanmin(val[iy,ix]))
val_max[idx]=np.nanmax(np.nanmax(val[iy,ix]))
val_std[idx]=np.nanstd(val[iy,ix])
#
if stats:
return val_mean,val_median,val_min,val_max,val_std
else:
return val_mean
else:
dep,lat,lon,val=read_ncmodel3d(dfile,var)
lon += lon_correction
val[val>=vmax]=np.nan
val_mean=np.ndarray((len(outlines),val.shape[0]))
if stats:
val_median=np.ndarray((len(outlines),val.shape[0]))
val_min=np.ndarray((len(outlines),val.shape[0]))
val_max=np.ndarray((len(outlines),val.shape[0]))
val_std=np.ndarray((len(outlines),val.shape[0]))
for idx,d in enumerate(outlines):
ix,iy=points_in_polygon(d,lon,lat)
for k in range(val_mean.shape[1]):
val_mean[idx,k]=np.nanmean(np.nanmean(val[k,iy,ix]))
if allstats:
val_median[idx,k]=np.nanmedian(np.nanmedian(val[k,iy,ix]))
val_min[idx,k]=np.nanmin(np.nanmin(val[k,iy,ix]))
val_max[idx,k]=np.nanmax(np.nanmax(val[k,iy,ix]))
val_std[idx,k]=np.nanstd(val[k,iy,ix])
#
if stats:
return dep,val_mean,val_median,val_min,val_max,val_std
else:
return dep,val_mean
# ##################### qml_to_event_list #####################################
def qml_to_event_list(events_QML,to_pd=False):
print("WARNING: this function has been renamed to qml2list. This warning will be removed in v0.7.x.")
return qml2list(events_QML,to_pd=to_pd)
# modified from qml_to_event_list in obspyDMT.utils.event_handler.py
def qml2list(events_QML,to_pd=False):
"""
convert QML to event list
===PARAMETERS===
events_QML: event qml (OBSPY CATALOG object)
to_pd: convert to Pandas DataFrame object. Default: False.
====return====
events: a list of event information or a pandas dataframe object.
"""
events = []
for i in range(len(events_QML)):
try:
event_time = events_QML.events[i].preferred_origin().time or \
events_QML.events[i].origins[0].time
event_time_month = '%02i' % int(event_time.month)
event_time_day = '%02i' % int(event_time.day)
event_time_hour = '%02i' % int(event_time.hour)
event_time_minute = '%02i' % int(event_time.minute)
event_time_second = '%02i' % int(event_time.second)
if not hasattr(events_QML.events[i], 'preferred_mag'):
events_QML.events[i].preferred_mag = \
events_QML.events[i].magnitudes[0].mag
events_QML.events[i].preferred_mag_type = \
events_QML.events[i].magnitudes[0].magnitude_type
events_QML.events[i].preferred_author = 'None'
else:
if not hasattr(events_QML.events[i], 'preferred_author'):
if events_QML.events[i].preferred_magnitude().creation_info:
events_QML.events[i].preferred_author = \
events_QML.events[i].preferred_magnitude().creation_info.author
elif events_QML.events[i].magnitudes[0].creation_info:
events_QML.events[i].preferred_author = \
events_QML.events[i].magnitudes[0].creation_info.author
except Exception as error:
print(error)
continue
try:
if not events_QML.events[i].focal_mechanisms == []:
if events_QML.events[i].preferred_focal_mechanism()['moment_tensor']['tensor']:
focal_mechanism = [
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_rr'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_tt'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_pp'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_rt'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_rp'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['tensor']['m_tp']]
else:
found_foc_mech = False
for foc_mech_qml in events_QML.events[i].focal_mechanisms:
if foc_mech_qml['moment_tensor']['tensor']:
focal_mechanism = [
foc_mech_qml['moment_tensor']['tensor']['m_rr'],
foc_mech_qml['moment_tensor']['tensor']['m_tt'],
foc_mech_qml['moment_tensor']['tensor']['m_pp'],
foc_mech_qml['moment_tensor']['tensor']['m_rt'],
foc_mech_qml['moment_tensor']['tensor']['m_rp'],
foc_mech_qml['moment_tensor']['tensor']['m_tp']
]
found_foc_mech = True
break
if not found_foc_mech:
focal_mechanism = False
else:
focal_mechanism = False
except AttributeError:
print("[WARNING] focal_mechanism does not exist for " \
"event: %s -- set to False" % (i+1))
focal_mechanism = False
except TypeError:
focal_mechanism = False
except Exception as error:
print(error)
focal_mechanism = False
try:
if not events_QML.events[i].focal_mechanisms == []:
source_duration = [
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['source_time_function']['type'],
events_QML.events[i].preferred_focal_mechanism()
['moment_tensor']['source_time_function']
['duration']]
if not source_duration[1]:
source_duration = mag_duration(
mag=events_QML.events[i].preferred_mag)
else:
source_duration = mag_duration(
mag=events_QML.events[i].preferred_mag)
except AttributeError:
print("[WARNING] source duration does not exist for " \
"event: %s -- set to False" % (i+1))
source_duration = False
except TypeError:
source_duration = False
except Exception as error:
print(error)
source_duration = False
try:
events.append(OrderedDict(
[('number', i+1),
('latitude',
events_QML.events[i].preferred_origin().latitude or
events_QML.events[i].origins[0].latitude),
('longitude',
events_QML.events[i].preferred_origin().longitude or
events_QML.events[i].origins[0].longitude),
('depth',
events_QML.events[i].preferred_origin().depth/1000. or
events_QML.events[i].origins[0].depth/1000.),
('datetime', event_time),
('magnitude',
events_QML.events[i].preferred_mag),
('magnitude_type',
events_QML.events[i].preferred_mag_type),
('author',
events_QML.events[i].preferred_author),
('event_id', str(event_time.year) +
event_time_month + event_time_day + '_' +
event_time_hour + event_time_minute +
event_time_second + '.a'),
('origin_id', events_QML.events[i].preferred_origin_id or
events_QML.events[i].origins[0].resource_id.resource_id),
('focal_mechanism', focal_mechanism),
('source_duration', source_duration),
('flynn_region', 'NAN'),
]))
except Exception as error:
print(error)
continue
if to_pd:
return pd.DataFrame(events)
else:
return events
# ##################### mag_duration ###################################
# modified from the same function in obspyDMT.utils.event_handler.py
def mag_duration(mag, type_curve=1):
"""
calculate the source duration out of magnitude
type_curve can be 1, 2, 3:
1: 2005-2014
2: 1976-1990
3: 1976-2014
:param mag:
:param type_curve:
:return:
"""
if type_curve == 1:
half_duration = 0.00272*np.exp(1.134*mag)
elif type_curve == 2:
half_duration = 0.00804*np.exp(1.025*mag)
elif type_curve == 3:
half_duration = 0.00392*np.exp(1.101*mag)
else:
sys.exit('%s Type for magnitude to source duration conversion is not '
'implemented' % type_curve)
source_duration = round(half_duration, 3)*2
return ['triangle', source_duration]
# sta_info_from_inv(inv) is modified from noise_module (with the same name)
#Check NoisePy: https://github.com/mdenolle/NoisePy
# added functionality to process an array of inventory
def sta_info_from_inv(inv):
'''
this function outputs station info from the obspy inventory object.
PARAMETERS:
----------------------
inv: obspy inventory object
RETURNS:
----------------------
sta: station name
net: netowrk name
lon: longitude of the station
lat: latitude of the station
elv: elevation of the station
location: location code of the station
'''
# load from station inventory
sta=[]
net=[]
lon=[]
lat=[]
elv=[]
location=[]
for i in range(len(inv[0])):
sta.append(inv[0][i].code)
net.append(inv[0].code)
lon.append(inv[0][i].longitude)
lat.append(inv[0][i].latitude)
if inv[0][i][0].elevation:
elv.append(inv[0][i][0].elevation)
else:
elv.append(0.)
# print(inv[0][i])
# print(inv[0][i].location_code)
if len(inv[0][i][0].location_code)>0:
location.append(inv[0][i][0].location_code)
else:
location.append('00')
if len(inv[0])==1:
sta=sta[0]
net=net[0]
lon=lon[0]
lat=lat[0]
elv=elv[0]
location=location[0]
# print(sta,net,lon,lat,elv,location)
return sta,net,lon,lat,elv,location
def get_tt(event_lat, event_long, sta_lat, sta_long, depth_km,model="iasp91",type='first'):
# get the seismic phase arrival time of the specified earthquake at the station.
sta_t = locations2degrees(event_lat, event_long, sta_lat, sta_long)
taup = TauPyModel(model=model)
arrivals = taup.get_travel_times(source_depth_in_km=depth_km,distance_in_degree=sta_t)
if type == 'first':
tt = arrivals[0].time
ph = arrivals[0].phase
else: #get specific phase
phase_found=False
phaseall=[]
for i in range(len(arrivals)):
phaseall.append(arrivals[i].phase.name)
if arrivals[i].phase.name == type:
tt = arrivals[i].time
ph = type
phase_found=True
break
if not phase_found:
raise ValueError('phase <'+type+' > not found in '+str(phaseall))
# del arrivals
return tt,ph
def resp_spectrum(source,resp_file,downsamp_freq,pre_filt=None):
'''
this function removes the instrument response using response spectrum from evalresp.
the response spectrum is evaluated based on RESP/PZ files before inverted using the obspy
function of invert_spectrum. a module of create_resp.py is provided in directory of 'additional_modules'
to create the response spectrum
PARAMETERS:
----------------------
source: obspy stream object of targeted noise data
resp_file: numpy data file of response spectrum
downsamp_freq: sampling rate of the source data
pre_filt: pre-defined filter parameters
RETURNS:
----------------------
source: obspy stream object of noise data with instrument response removed
'''
#--------resp_file is the inverted spectrum response---------
respz = np.load(resp_file)
nrespz= respz[1][:]
spec_freq = max(respz[0])
#-------on current trace----------
nfft = _npts2nfft(source[0].stats.npts)
sps = int(source[0].stats.sampling_rate)
#---------do the interpolation if needed--------
if spec_freq < 0.5*sps:
raise ValueError('spectrum file has peak freq smaller than the data, abort!')
else:
indx = np.where(respz[0]<=0.5*sps)
nfreq = np.linspace(0,0.5*sps,nfft//2+1)
nrespz= np.interp(nfreq,np.real(respz[0][indx]),respz[1][indx])
#----do interpolation if necessary-----
source_spect = np.fft.rfft(source[0].data,n=nfft)
#-----nrespz is inversed (water-leveled) spectrum-----
source_spect *= nrespz
source[0].data = np.fft.irfft(source_spect)[0:source[0].stats.npts]
if pre_filt is not None:
source[0].data = np.float32(bandpass(source[0].data,pre_filt[0],pre_filt[-1],df=sps,corners=4,zerophase=True))
return source
def stats2inv(stats,locs=None,format=None):
'''
this function creates inventory given the stats parameters in an obspy stream or a station list.
PARAMETERS:
------------------------
stats: obspy trace stats object containing all station header info
locs: panda data frame of the station list. it is needed for converting miniseed files into ASDF
format: format of the original data that the obspy trace was built from. if not specified, it will
read the format by the Trace._format attribute. 'sac' format will be used if there is a sac
dictionary in stats.
RETURNS:
------------------------
inv: obspy inventory object of all station info to be used later
'''
staxml = False
respdir = "."
if format is None:
input_fmt = stats._format.lower()
if 'sac' in list(stats.keys()):
input_fmt = 'sac'
else:
input_fmt = format
if staxml:
if not respdir:
raise ValueError('Abort! staxml is selected but no directory is given to access the files')
else:
invfile = glob.glob(os.path.join(respdir,'*'+stats.station+'*'))
if os.path.isfile(str(invfile)):
inv = obspy.read_inventory(invfile)
return inv
inv = Inventory(networks=[],source="homegrown")
if input_fmt=='sac':
if 'sac' not in list(stats.keys()):
raise ValueError('Abort! sac key is not in stats for input format: sac.')
else:
net = Network(
# This is the network code according to the SEED standard.
code=stats.network,
stations=[],
description="created from SAC and resp files",
start_date=stats.starttime)
sta = Station(
# This is the station code according to the SEED standard.
code=stats.station,
latitude=stats.sac["stla"],
longitude=stats.sac["stlo"],
elevation=stats.sac["stel"],
creation_date=stats.starttime,
site=Site(name="First station"))
cha = Channel(
# This is the channel code according to the SEED standard.
code=stats.channel,
# This is the location code according to the SEED standard.
location_code=stats.location,
# Note that these coordinates can differ from the station coordinates.
latitude=stats.sac["stla"],
longitude=stats.sac["stlo"],
elevation=stats.sac["stel"],
depth=-stats.sac["stel"],
azimuth=stats.sac["cmpaz"],
dip=stats.sac["cmpinc"],
sample_rate=stats.sampling_rate)
else:# input_fmt == 'mseed':
if locs is not None:
ista=locs[locs['station']==stats.station].index.values.astype('int64')[0]
net = Network(
# This is the network code according to the SEED standard.
code=locs.iloc[ista]["network"],
stations=[],
description="created from SAC and resp files",
start_date=stats.starttime)
sta = Station(
# This is the station code according to the SEED standard.
code=locs.iloc[ista]["station"],
latitude=locs.iloc[ista]["latitude"],
longitude=locs.iloc[ista]["longitude"],
elevation=locs.iloc[ista]["elevation"],
creation_date=stats.starttime,
site=Site(name="First station"))
cha = Channel(
code=stats.channel,
location_code=stats.location,
latitude=locs.iloc[ista]["latitude"],
longitude=locs.iloc[ista]["longitude"],
elevation=locs.iloc[ista]["elevation"],
depth=-locs.iloc[ista]["elevation"],
azimuth=0,
dip=0,
sample_rate=stats.sampling_rate)
else:
raise ValueError('locs has to be specified for miniseed data and other formats.')
response = obspy.core.inventory.response.Response()
# Now tie it all together.
cha.response = response
sta.channels.append(cha)
net.stations.append(sta)
inv.networks.append(net)
return inv
# split_datetimestr(inv) is modified from NoisePy.noise_module.get_event_list()
#Check NoisePy: https://github.com/mdenolle/NoisePy
def split_datetimestr(dtstr1,dtstr2,inc_hours):
'''
this function calculates the datetime list between datetime1 and datetime2 by
increment of inc_hours in the formate of %Y_%m_%d_%H_%M_%S
PARAMETERS:
----------------
dtstr1: string of the starting time -> 2010_01_01_0_0
dtstr2: string of the ending time -> 2010_10_11_0_0
inc_hours: integer of incremental hours
RETURNS:
----------------
dtlist: a numpy character list
'''
date1=dtstr1.split('_')
date2=dtstr2.split('_')
y1=int(date1[0]);m1=int(date1[1]);d1=int(date1[2])
h1=int(date1[3]);mm1=int(date1[4]);mn1=int(date1[5])
y2=int(date2[0]);m2=int(date2[1]);d2=int(date2[2])
h2=int(date2[3]);mm2=int(date2[4]);mn2=int(date2[5])
d1=datetime.datetime(y1,m1,d1,h1,mm1,mn1)
d2=datetime.datetime(y2,m2,d2,h2,mm2,mn2)
dt=datetime.timedelta(hours=inc_hours)
dtlist = []
while(d1<d2):
dtlist.append(d1.strftime('%Y_%m_%d_%H_%M_%S'))
d1+=dt
dtlist.append(d2.strftime('%Y_%m_%d_%H_%M_%S'))
return dtlist
#Adapted from NoisePy function with the same name.
@jit('float32[:](float32[:],float32)')
def segment_interpolate(sig1,nfric):
'''
this function interpolates the data to ensure all points located on interger times of the
sampling rate (e.g., starttime = 00:00:00.015, delta = 0.05.)
PARAMETERS:
----------------------
sig1: seismic recordings in a 1D array
nfric: the amount of time difference between the point and the adjacent assumed samples
RETURNS:
----------------------
sig2: interpolated seismic recordings on the sampling points
'''
npts = len(sig1)
sig2 = np.zeros(npts,dtype=np.float32)
#----instead of shifting, do a interpolation------
for ii in range(npts):
#----deal with edges-----
if ii==0 or ii==npts-1:
sig2[ii]=sig1[ii]
else:
#------interpolate using a hat function------
sig2[ii]=(1-nfric)*sig1[ii+1]+nfric*sig1[ii]
return sig2
#
def get_tracetag(tr):
"""
Returns the standard OBSPY format tag for seismic trace.
Parameter
----------
tr::class:`~obspy.core.Trace`
Seismic trace.
Return
----------
tag::String
Tag for the input trace.
"""
tag=''
if not isinstance(tr, Trace):
raise(Exception("Error get_tracetag() - "
+ str(tr)+" is not a Trace object"))
if len(tr.stats.location) == 0:
tlocation='00'
else:
tlocation=tr.stats.location
tag=tr.stats.channel.lower()+'_'+tlocation.lower()
return tag
# Modified from <NAME>. Modified by Xiaotao to get filter frequencies from the arguments
# 1. added titles for multiple plots
# 2. determine freqmax as the Nyquist frequency, if not specified
# 3. Added mode with option to plot overlapping figures.
def plot_trace(tr_list,freq=[],size=(10,9),ylabels=[],datalabels=[],\
title=[],outfile='test.ps',xlimit=[],subplotpar=[], \
mode="subplot",spacing=2.0,colors=[],verbose=False):
"""
mode: subplot, overlap, or gather. In gather mode, traces will be offset and normalized.
"""
plt.figure(figsize=size)
ntr=len(tr_list)
if len(subplotpar)==0 and mode=="subplot":
subplotpar=(ntr,1)
myymin=[]
myymax=[]
for itr,tr in enumerate(tr_list,1):
if isinstance(tr,obspy.core.stream.Stream) or isinstance(tr,list):
if len(tr) >0:
tc=tr[0].copy()
else:
continue
else:
tc=tr.copy()
tt=tc.times()
if len(xlimit)==0:
xlimit=[np.min(tt),np.max(tt)]
imin = np.searchsorted(tt,xlimit[0],side="left")
imax = np.searchsorted(tt,xlimit[1],side="left")
if len(freq)>0:
if verbose:print("station %s.%s, filtered at [%6.3f, %6.3f]" % (tc.stats.network,
tc.stats.station,freq[0],freq[1]))
tc.filter('bandpass',freqmin=freq[0],freqmax=freq[1],zerophase=True)
else:
if verbose:print("station %s.%s" % (tc.stats.network,tc.stats.station))
if mode=="subplot":
ax=plt.subplot(subplotpar[0],subplotpar[1],itr)
plt.tight_layout(pad=spacing)
if len(colors)==0:
plt.plot(tt,tc.data)
elif len(colors)==1:
plt.plot(tt,tc.data,colors[0])
else:
plt.plot(tt,tc.data,colors[itr-1])
plt.xlabel("time (s)")
ax.ticklabel_format(axis='x',style='plain')
if np.max(np.abs(tc.data[imin:imax])) >= 1e+4 or \
np.max(np.abs(tc.data[imin:imax])) <= 1e-4:
ax.ticklabel_format(axis='both',style='sci')
if len(ylabels)>0:
plt.ylabel(ylabels[itr-1])
if len(title)>0:
plt.title(title[itr-1])
if len(xlimit)>0:
plt.xlim(xlimit)
plt.ylim(0.9*np.min(tc.data[imin:imax]),1.1*np.max(tc.data[imin:imax]))
if len(freq)>0:
plt.text(np.mean(xlimit),0.9*np.max(tc.data[imin:imax]),\
"["+str(freq[0])+", "+str(freq[1])+"] Hz", \
horizontalalignment='center',verticalalignment='center',fontsize=12)
elif mode=="overlap":
if itr==1:ax=plt.subplot(1,1,1)
if len(colors)==0:
plt.plot(tt,tc.data)
elif len(colors)==1:
plt.plot(tt,tc.data,colors[0])
else:
plt.plot(tt,tc.data,colors[itr-1])
plt.xlabel("time (s)")
myymin.append(0.9*np.min(tc.data[imin:imax]))
myymax.append(1.1*np.max(tc.data[imin:imax]))
if itr==ntr:
ax.ticklabel_format(axis='x',style='plain')
if len(datalabels)>0: ax.legend(datalabels)
if len(ylabels)>0:
plt.ylabel(ylabels[0])
if len(title)>0:
plt.title(title)
if len(xlimit)>0:
plt.xlim(xlimit)
plt.ylim(np.min(myymin),np.max(myymax))
if len(freq)>0:
plt.text(np.mean(xlimit),0.85*np.max(myymax),"["+str(freq[0])+", "+str(freq[1])+"] Hz",\
horizontalalignment='center',verticalalignment='center',fontsize=14)
elif mode=="gather":
if itr==1:ax=plt.subplot(1,1,1)
if len(colors)==0:
plt.plot(tt,itr-1+0.5*tc.data/np.max(np.abs(tc.data)))
elif len(colors)==1:
plt.plot(tt,itr-1+0.5*tc.data/np.max(np.abs(tc.data)),colors[0])
else:
plt.plot(tt,itr-1+0.5*tc.data/np.max(np.abs(tc.data)),colors[itr-1])
plt.xlabel("time (s)")
plt.text(xlimit[0]+10,itr-1+0.2,tc.stats.network+"."+tc.stats.station,
horizontalalignment='left',verticalalignment='center',fontsize=11)
if itr==ntr:
ax.ticklabel_format(axis='x',style='plain')
if len(datalabels)>0: ax.legend(datalabels)
if len(ylabels)>0:
plt.ylabel(ylabels[0])
if len(title)>0:
plt.title(title)
if len(xlimit)>0:
plt.xlim(xlimit)
plt.ylim([-0.7,ntr-0.3])
if len(freq)>0:
plt.text(np.mean(xlimit),0.85*np.max(myymax),"["+str(freq[0])+", "+str(freq[1])+"] Hz",\
horizontalalignment='center',verticalalignment='center',fontsize=14)
else:
raise ValueError("mode: %s is not recoganized. Can ONLY be: subplot, overlap, or gather."%(mode))
plt.savefig(outfile,orientation='landscape')
plt.show()
plt.close()
def check_overlap(t1,t2,error=0):
"""
check the common
t1,t2: list or numpy arrays.
error: measurement error in comparison. default is 0
"""
ind1=[]
ind2=[]
if isinstance(t1,list):t1=np.array(t1)
if isinstance(t2,list):t2=np.array(t2)
for i in range(len(t1)):
f1=t1[i]
ind_temp=np.where(np.abs(t2-f1)<=error)[0]
if len(ind_temp)>0:
ind1.append(i)
ind2.append(ind_temp[0])
return ind1,ind2
#Modified from noisepy function cut_trace_make_statis().
def slicing_trace(source,win_len_secs,step_secs=None,taper_frac=0.02):
'''
this function cuts continous noise data into user-defined segments, estimate the statistics of
each segment and keep timestamp of each segment for later use.
PARAMETERS:
----------------------
source: obspy stream object
exp_len_hours: expected length of the data (source) in hours
win_len_secs: length of the slicing segments in seconds
step_secs: step of slicing in seconds. When None (default) or 0.0, only returns one window.
RETURNS:
----------------------
trace_stdS: standard deviation of the noise amplitude of each segment
dataS_t: timestamps of each segment
dataS: 2D matrix of the segmented data
'''
# statistic to detect segments that may be associated with earthquakes
all_madS = mad(source[0].data) # median absolute deviation over all noise window
all_stdS = np.std(source[0].data) # standard deviation over all noise window
if all_madS==0 or all_stdS==0 or np.isnan(all_madS) or np.isnan(all_stdS):
print("return empty! madS or stdS equals to 0 for %s" % source)
return [],[],[]
if isinstance(source,Trace):source=Stream([source])
# useful parameters for trace sliding
sps = source[0].stats.sampling_rate
starttime = source[0].stats.starttime-obspy.UTCDateTime(1970,1,1)
duration = source[0].stats.endtime-obspy.UTCDateTime(1970,1,1) - starttime
if duration < win_len_secs:
print("return empty! data duration is < slice length." % source)
return [],[],[]
if step_secs is None or step_secs == 0.0:
nseg=1
npts_step = 0
else:
nseg = int(np.floor((duration-win_len_secs)/step_secs))
npts_step = int(step_secs*sps)
# initialize variables
npts = int(win_len_secs*sps)
trace_stdS = np.zeros(nseg,dtype=np.float32)
dataS = np.zeros(shape=(nseg,npts),dtype=np.float32)
dataS_t = np.zeros(nseg,dtype=np.float)
print('slicing trace into ['+str(nseg)+'] segments.')
indx1 = 0
for iseg in range(nseg):
indx2 = indx1+npts
dataS[iseg] = source[0].data[indx1:indx2]
trace_stdS[iseg] = (np.max(np.abs(dataS[iseg]))/all_stdS)
dataS_t[iseg] = starttime+step_secs*iseg
indx1 += npts_step
# 2D array processing
dataS = demean(dataS)
dataS = detrend(dataS)
dataS = taper(dataS,fraction=taper_frac)
return trace_stdS,dataS_t,dataS
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
# Modified by Xiaotao to return window starting indices and the option of forcing to slide through full length.
def sliding_window(a, ws, ss=None, wind=None, getindex=False,full_length=False,verbose=False):
"""
Function to split a data array into overlapping, possibly tapered sub-windows.
Parameters
----------
a : :class:`~numpy.ndarray`
1D array of data to split
ws : int
Window size in samples
ss : int
Step size in samples. If not provided, window and step size
are equal.
wind : :class:`~numpy.ndarray`
1d array to specify the window used to apply taper or None (default).
getindex : bool
Save/return the start index for each window if True.
full_length : bool
Add an extra window to include the leftover samples to make sure sliding
through the entire trace with full-length. This is done by measuring one
window starting from the end backward. When False, this function skips the
tailing samples if less than the window size.
Returns
-------
out : :class:`~numpy.ndarray`
1D array of windowed data
nd : int
Number of windows
idx : :class:`~numpy.ndarray`
(Optional) The starting indices of the windows, with the size of [nd,1]
"""
if full_length and verbose:
print("WARNING: Force slide to the full length, the last window measures backward from the end.")
if ws > len(a):
raise(Exception("Error slicing() - window size is bigger than data length."))
if ss is None:
# no step size was provided. Return non-overlapping windows
ss = ws
# Calculate the number of windows to return, ignoring leftover samples, and
# allocate memory to contain the samples
nd = len(a) // ss
tailcare=False
if (nd-1)*ss + ws > len(a):
if full_length:
tailcare = True
else:
nd = nd - 1
elif (nd-1)*ss + ws < len(a) and full_length:
tailcare = True
nd = nd + 1
out = np.ndarray((nd, ws), dtype=a.dtype)
idx = np.ndarray((nd,),dtype=int)
if nd==0:
if wind is not None:
out = a * wind
else:
out = a
idx=0
else:
for i in range(nd):
# "slide" the window along the samples
start = i * ss
stop = start + ws
# print(i,start,stop,len(a))
# print(i,nd)
if stop > len(a) and tailcare:
stop = len(a)
start = stop - ws
# print(i,start,stop)
if stop <= len(a):
if wind is not None:
out[i] = a[start: stop] * wind
else:
out[i] = a[start: stop]
idx[i] = start
# idx[i][1] = stop
if getindex:
return out,nd,idx
else:
return out, nd
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def calculate_windowed_fft(a, fs, ws, ss=None, wind=None,getindex=False,full_length=False):
"""
Calculates windowed Fourier transform
Parameters
----------
a : :class:`~numpy.ndarray`
1d array
fs : int
sampling rate (samples per second)
ws : int
Window size, in number of samples
ss : int
Step size, or number of samples until next window
wind : :class:`~numpy.ndarray`
1d array to specify the window used to apply taper or None (default).
getindex : bool
Save/return the start index for each window if True.
full_length : bool
Add an extra window to include the leftover samples to make sure sliding
through the entire trace with full-length. This is done by measuring one
window starting from the end backward. When False, this function skips the
tailing samples if less than the window size.
Returns
-------
ft : :class:`~numpy.ndarray`
Fourier transform of trace
f : :class:`~numpy.ndarray`
Frequency axis in Hz
idx : :class:`~numpy.ndarray`
(Optional) The starting indices of the windows, with the size of [nd,1]
"""
n2 = _npow2(ws)
f = np.fft.rfftfreq(n2,1/fs)
# Extract sliding windows
if getindex:
tr, nd,idx = sliding_window(a, ws, ss, wind=wind,getindex=True,
full_length=full_length)
else:
tr, nd = sliding_window(a, ws, ss,wind=wind,
full_length=full_length)
# Fourier transform
ft = np.fft.fft(tr, n=n2)
if getindex:
return ft,f,idx
else:
return ft, f
def psd(d,s,axis=-1,db=False):
"""
Compute power spectral density. The power spectrum is normalized by
frequency resolution.
====PARAMETERS====
d: numpy ndarray containing the data.
s: sampling frequency (samples per second)
axis: axis to computer PSD. default is the last dimension (-1).
====RETURNS=======
f: frequency array
psd: power spectral density
"""
if isinstance(d,list):d=np.array(d)
if d.ndim >2:
print('data has >2 dimension. skip demean.')
else:
d=detrend(demean(d))
ft=fft(d,axis=axis)
psd=np.square(np.abs(ft))/s
N=int(psd.shape[-1]/2)
f=np.linspace(0, s/2, N)
if d.ndim ==1:
psd=psd[:N]
elif d.ndim==2:
psd=psd[:,:N]
if db:
psd=10*np.log10(np.abs(psd))
return f,psd
def plot_slidingwindows(duration=3600*6,fs=20,window=7200,
overlaps=[None,0.1,0.1,0.2,0.2,0.3],
tapers=[None,None,0.05,0.05,0.1,0.1],
full_length=True,size=(12,12),save=False,
format='png'):
"""
This function plots tapered sliding windows for illustration purpose.
Parameters
----------
duration: length of the demo data in seconds.
fs: sampling rate of the data, used to get time information
window: the window length you want to test.
overlaps: an array specifying the series of window overlaps (0.0-1.0) for test.
tapers: window ends will be tapered.
"""
data=np.zeros((duration*fs,))
t=np.arange(len(data))/fs
ws=int(window*fs)
plt.figure(figsize=size)
print("connecting locations")
print("start end")
colorlist=['k','b','g','c','y','r','m']
for i in range(len(overlaps)):
if overlaps[i] is None:
ol=0
else:
ol=overlaps[i]
if tapers[i] is None:
tp=0
else:
tp=tapers[i]
tps = int(0.5*window*ol*fs) #location where the two windows connect.
step = int(window*(1-ol)*fs)
wind=tukey(ws,2*tp)
print(tps/fs,window - tps/fs)
dout,nd,idx=sliding_window(data,ws,ss=step,getindex=True,
full_length=full_length,verbose=False)
ax=plt.subplot(len(tapers),1,i+1)
if len(idx) > len(colorlist):
windcolor=colorlist*(len(idx)//len(colorlist) + 1)
else:
windcolor=colorlist
for j in range(len(idx)):
plt.tight_layout(pad=1)
plt.plot(t[np.arange(ws)+idx[j]],wind,windcolor[j])
if j >0 and j+1 < len(idx):
plt.plot(t[tps+j*step],1,'og')
plt.plot(t[int(ol*window*fs)+j*step],1,'^r')
plt.title("overlap: "+str(ol)+", one-side taper: "+str(tp))
plt.xlim((np.min(t),np.max(t)))
ax.legend(['tukey','tukey','connection','overlap'])
if save:
plt.savefig("slidingwindows_illustration."+format)
else:
plt.show()
# def smooth(data, np, poly=0, axis=0):
# return savgol_filter(data, np, poly, axis=axis, mode='wrap')
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def smooth(data, nd, axis=0):
"""
Function to smooth power spectral density functions from the convolution
of a boxcar function with the PSD
Parameters
----------
data : :class:`~numpy.ndarray`
Real-valued array to smooth (PSD)
nd : int
Number of samples over which to smooth
axis : int
axis over which to perform the smoothing
Returns
-------
filt : :class:`~numpy.ndarray`, optional
Filtered data
"""
if np.any(data):
if data.ndim > 1:
filt = np.zeros(data.shape)
for i in range(data.shape[::-1][axis]):
if axis == 0:
filt[:, i] = np.convolve(
data[:, i], np.ones((nd,))/nd, mode='same')
elif axis == 1:
filt[i, :] = np.convolve(
data[i, :], np.ones((nd,))/nd, mode='same')
else:
filt = np.convolve(data, np.ones((nd,))/nd, mode='same')
return filt
else:
return None
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def admittance(Gxy, Gxx):
"""
Calculates admittance between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Gxx : :class:`~numpy.ndarray`
Power spectral density function of `x`
Returns
-------
: :class:`~numpy.ndarray`, optional
Admittance between `x` and `y`
"""
if np.any(Gxy) and np.any(Gxx):
return np.abs(Gxy)/Gxx
else:
return None
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def coherence(Gxy, Gxx, Gyy):
"""
Calculates coherence between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Gxx : :class:`~numpy.ndarray`
Power spectral density function of `x`
Gyy : :class:`~numpy.ndarray`
Power spectral density function of `y`
Returns
-------
: :class:`~numpy.ndarray`, optional
Coherence between `x` and `y`
"""
if np.any(Gxy) and np.any(Gxx) and np.any(Gxx):
return np.abs(Gxy)**2/(Gxx*Gyy)
else:
return None
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def phase(Gxy):
"""
Calculates phase angle between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Returns
-------
: :class:`~numpy.ndarray`, optional
Phase angle between `x` and `y`
"""
if np.any(Gxy):
return np.angle(Gxy)
else:
return None
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def rotate_dir(tr1, tr2, direc):
d = -direc*np.pi/180.+np.pi/2.
rot_mat = np.array([[np.cos(d), -np.sin(d)],
[np.sin(d), np.cos(d)]])
v12 = np.array([tr2, tr1])
vxy = np.tensordot(rot_mat, v12, axes=1)
tr_2 = vxy[0, :]
tr_1 = vxy[1, :]
return tr_1
# modified from the same functions as in: https://github.com/nfsi-canada/OBStools/blob/master/obstools/atacr/utils.py
def ftest(res1, pars1, res2, pars2):
from scipy.stats import f as f_dist
N1 = len(res1)
N2 = len(res2)
dof1 = N1 - pars1
dof2 = N2 - pars2
Ea_1 = np.sum(res1**2)
Ea_2 = np.sum(res2**2)
Fobs = (Ea_1/dof1)/(Ea_2/dof2)
P = 1. - (f_dist.cdf(Fobs, dof1, dof2) - f_dist.cdf(1./Fobs, dof1, dof2))
return P
def _npow2(x):
return 1 if x == 0 else 2**(x-1).bit_length()
def nextpow2(x):
"""
Returns the next power of 2 of x.
"""
return int(np.ceil(np.log2(np.abs(x))))
#save trace to files.
def save2asdf(fname,data,tag,sta_inv=None,group='waveforms',para=None,event=None):
"""
A wrapper to save obspy stream to asdf file.
Parameters
----------
fname : string
Output ASDF file name, with *.h5 extension.
data :: class `~obspy.core.Stream` or class `~numpy.ndarray`
Obspy Stream or numpy.ndarray object. For stream, all traces should belong to one single station,
particularly when sta_inv is provided.
tag :: string list
List of tags for each trace in the `data` object.
sta_inv : station inventory
Staion xml (obspy station inventory).
group : string
Group to save the data. Available options include 'waveforms', 'auxiliary'
para : dictionary
A dictionary to store saving parameters.
"""
if group == 'waveforms':
if len(data) != len(tag):
raise(Exception('save2asdf: the stream and tag list should have the same length.'))
if not os.path.isfile(fname):
ds=pyasdf.ASDFDataSet(fname,mpi=False,compression="gzip-3",mode='w')
if event is not None:
ds.add_quakeml(event)
else:
ds=pyasdf.ASDFDataSet(fname,mpi=False,compression="gzip-3",mode='a')
#save
if sta_inv is not None:
ds.add_stationxml(sta_inv)
if group == 'waveforms':
for i in range(len(data)):
ds.add_waveforms(data[i],tag=tag[i])
elif group == 'auxiliary':
try:
data_type=para['data_type']
data_path=para['data_path']
parameters = para['parameters']
except Exception as e:
raise(Exception('save2adsf: '+e))
try:
provenance_id=para['provenance_id']
except Exception as e:
provenance_id=None
ds.add_auxiliary_data(data,data_type,data_path,parameters=parameters,
provenance_id=provenance_id)
def get_cc(s1,s_ref):
# returns the correlation coefficient between waveforms in s1 against reference
# waveform s_ref.
#
cc=np.zeros(s1.shape[0])
s_ref_norm = np.linalg.norm(s_ref)
for i in range(s1.shape[0]):
cc[i]=np.sum(np.multiply(s1[i,:],s_ref))/np.linalg.norm(s1[i,:])/s_ref_norm
return cc
@jit(nopython = True)
def moving_ave(A,N):
'''
this Numba compiled function does running smooth average for an array.
PARAMETERS:
---------------------
A: 1-D array of data to be smoothed
N: integer, it defines the half window length to smooth
RETURNS:
---------------------
B: 1-D array with smoothed data
'''
A = np.concatenate((A[:N],A,A[-N:]),axis=0)
B = np.zeros(A.shape,A.dtype)
tmp=0.
for pos in range(N,A.size-N):
# do summing only once
if pos==N:
for i in range(-N,N+1):
tmp+=A[pos+i]
else:
tmp=tmp-A[pos-N-1]+A[pos+N]
B[pos]=tmp/(2*N+1)
if B[pos]==0:
B[pos]=1
return B[N:-N]
def ftn(data,dt,fl,fh,df=None,taper_frac=None,taper_maxlen=20,max_abs=2,
inc_type='linear',nf=100):
"""
Conduct frequency-time normalization, based on the method in Shen, BSSA, 2012. This function
was wrote based on the MATLAB version, obtained from Dr. <NAME> at UMass Amhert.
============PARAMETERS===============
data: Numpy ndarray of the data, maximum dimension=2.
dt: sample interval in time (s)
fl: lowest frequency.
fh: highest frequency.
df: frequency interval in narrow band filtering, default is df=fl/4
taper_frac: fraction 0-1 for tapering. ignore tapering if None.
taper_maxlen: maxlength in number of points for tapering (ignore if taper_frac is None). Defatul 20.
max_abs: maximum absolute value of the data after FTN. Default 2.
inc_type: frequency increment type, 'linear' [default] or 'log'. when 'linear', df will be used
and when 'log', nf will be used.
nf: number of frequencies for 'log' type increment. default 100.
============RETURNS================
dftn: data after FTN.
====================================
Ref: Shen et al. (2012) An Improved Method to Extract Very-Broadband Empirical Green’s
Functions from Ambient Seismic Noise, BSSA, doi: 10.1785/0120120023
"""
if fh>0.5/dt:
raise ValueError('upper bound of frequency CANNOT be larger than Nyquist frequency.')
if inc_type=="log":
dinc=1 - 1/np.geomspace(1,100,nf)
dinc=np.append(dinc,1)
freqs=fl + dinc*(fh-fl)
elif inc_type=="linear":
if df is None: df=fl/4
freqs=np.arange(fl,fh+0.5*df,df)
if freqs[-1]>0.5/dt:freqs[-1]=0.5/dt
ncorners=4
if taper_frac is None:
d=data
else:
d=taper(data,fraction=taper_frac,maxlen=taper_maxlen)
dftn=np.zeros(d.shape,dtype=d.dtype)
if d.ndim == 1:
for i in range(len(freqs)-1):
dfilter=bandpass(d,freqs[i],freqs[i+1],1/dt,corners=ncorners, zerophase=True)
env=np.abs(hilbert(dfilter))
dftn += np.divide(dfilter,env)
dftn /= np.sqrt(len(freqs)-1)
#normalization
idx=np.where(np.abs(dftn)>max_abs)[0]
if len(idx)>0: dftn[idx]=0.0
elif d.ndim==2:
for k in range(d.shape[0]):
for i in range(len(freqs)-1):
dfilter=bandpass(d[k,:],freqs[i],freqs[i+1],1/dt,corners=ncorners, zerophase=True)
env=np.abs(hilbert(dfilter))
dftn[k,:] += np.divide(dfilter,env)
dftn[k,:] /= np.sqrt(len(freqs)-1)
#normalization
idx=np.where(np.abs(dftn[k,:])>max_abs)[0]
if len(idx)>0: dftn[k,idx]=0.0
else:
raise ValueError('Dimension %d is higher than allowed 2.'%(d.ndim))
#taper
if taper_frac is not None:
dftn=taper(dftn,fraction=taper_frac,maxlen=taper_maxlen)
return dftn
def mad(arr):
"""
Median Absolute Deviation: MAD = median(|Xi- median(X)|)
PARAMETERS:
-------------------
arr: numpy.ndarray, seismic trace data array
RETURNS:
data: Median Absolute Deviation of data
"""
if not np.ma.is_masked(arr):
med = np.median(arr)
data = np.median(np.abs(arr - med))
else:
med = np.ma.median(arr)
data = np.ma.median(np.ma.abs(arr-med))
return data
def detrend(data):
'''
this function removes the signal trend based on QR decomposion
NOTE: QR is a lot faster than the least square inversion used by
scipy (also in obspy).
PARAMETERS:
---------------------
data: input data matrix
RETURNS:
---------------------
data: data matrix with trend removed
'''
#ndata = np.zeros(shape=data.shape,dtype=data.dtype)
if data.ndim == 1:
npts = data.shape[0]
X = np.ones((npts,2))
X[:,0] = np.arange(0,npts)/npts
Q,R = np.linalg.qr(X)
rq = np.dot(np.linalg.inv(R),Q.transpose())
coeff = np.dot(rq,data)
data = data-np.dot(X,coeff)
elif data.ndim == 2:
npts = data.shape[1]
X = np.ones((npts,2))
X[:,0] = np.arange(0,npts)/npts
Q,R = np.linalg.qr(X)
rq = np.dot(np.linalg.inv(R),Q.transpose())
for ii in range(data.shape[0]):
coeff = np.dot(rq,data[ii])
data[ii] = data[ii] - np.dot(X,coeff)
return data
def demean(data,axis=-1):
'''
this function remove the mean of the signal
PARAMETERS:
---------------------
data: input data matrix
axis: axis to operate.
RETURNS:
---------------------
data: data matrix with mean removed
'''
#ndata = np.zeros(shape=data.shape,dtype=data.dtype)
if data.ndim == 1:
data = data-np.mean(data)
elif data.ndim == 2:
m=np.mean(data,axis=axis)
for ii in range(data.shape[0]):
if axis==-1:
data[ii] = data[ii]-m[ii]
else:
data[:,ii] = data[:,ii]-m[ii]
return data
def taper(data,fraction=0.05,maxlen=20):
'''
this function applies a cosine taper using obspy functions
PARAMETERS:
---------------------
data: input data matrix
RETURNS:
---------------------
data: data matrix with taper applied
'''
#ndata = np.zeros(shape=data.shape,dtype=data.dtype)
if data.ndim == 1:
npts = data.shape[0]
# window length
wlen = int(npts*fraction)
if wlen>maxlen:wlen = maxlen
# taper values
func = _get_function_from_entry_point('taper', 'hann')
if 2*wlen == npts:
taper_sides = func(2*wlen)
else:
taper_sides = func(2*wlen+1)
# taper window
win = np.hstack((taper_sides[:wlen], np.ones(npts-2*wlen),taper_sides[len(taper_sides) - wlen:]))
data *= win
elif data.ndim == 2:
npts = data.shape[1]
# window length
wlen = int(npts*fraction)
if wlen>maxlen:wlen = maxlen
# taper values
func = _get_function_from_entry_point('taper', 'hann')
if 2*wlen == npts:
taper_sides = func(2*wlen)
else:
taper_sides = func(2*wlen + 1)
# taper window
win = np.hstack((taper_sides[:wlen], np.ones(npts-2*wlen),taper_sides[len(taper_sides) - wlen:]))
for ii in range(data.shape[0]):
data[ii] *= win
return data
def whiten(data, fft_para):
'''
This function takes 1-dimensional timeseries array, transforms to frequency domain using fft,
whitens the amplitude of the spectrum in frequency domain between *freqmin* and *freqmax*
and returns the whitened fft.
PARAMETERS:
----------------------
data: numpy.ndarray contains the 1D time series to whiten
fft_para: dict containing all fft_cc parameters such as
dt: The sampling space of the `data`
freqmin: The lower frequency bound
freqmax: The upper frequency bound
smooth_N: integer, it defines the half window length to smooth
freq_norm: whitening method between 'one-bit' and 'RMA'
RETURNS:
----------------------
FFTRawSign: numpy.ndarray contains the FFT of the whitened input trace between the frequency bounds
'''
# load parameters
delta = fft_para['dt']
freqmin = fft_para['freqmin']
freqmax = fft_para['freqmax']
smooth_N = fft_para['smooth_N']
freq_norm = fft_para['freq_norm']
# Speed up FFT by padding to optimal size for FFTPACK
if data.ndim == 1:
axis = 0
elif data.ndim == 2:
axis = 1
Nfft = int(next_fast_len(int(data.shape[axis])))
Napod = 100
Nfft = int(Nfft)
freqVec = scipy.fftpack.fftfreq(Nfft, d=delta)[:Nfft // 2]
J = np.where((freqVec >= freqmin) & (freqVec <= freqmax))[0]
low = J[0] - Napod
if low <= 0:
low = 1
left = J[0]
right = J[-1]
high = J[-1] + Napod
if high > Nfft/2:
high = int(Nfft//2)
FFTRawSign = scipy.fftpack.fft(data, Nfft,axis=axis)
# Left tapering:
if axis == 1:
FFTRawSign[:,0:low] *= 0
FFTRawSign[:,low:left] = np.cos(
np.linspace(np.pi / 2., np.pi, left - low)) ** 2 * np.exp(
1j * np.angle(FFTRawSign[:,low:left]))
# Pass band:
if freq_norm == 'phase_only':
FFTRawSign[:,left:right] = np.exp(1j * np.angle(FFTRawSign[:,left:right]))
elif freq_norm == 'rma':
for ii in range(data.shape[0]):
tave = moving_ave(np.abs(FFTRawSign[ii,left:right]),smooth_N)
FFTRawSign[ii,left:right] = FFTRawSign[ii,left:right]/tave
# Right tapering:
FFTRawSign[:,right:high] = np.cos(
np.linspace(0., np.pi / 2., high - right)) ** 2 * np.exp(
1j * np.angle(FFTRawSign[:,right:high]))
FFTRawSign[:,high:Nfft//2] *= 0
# Hermitian symmetry (because the input is real)
FFTRawSign[:,-(Nfft//2)+1:] = np.flip(np.conj(FFTRawSign[:,1:(Nfft//2)]),axis=axis)
else:
FFTRawSign[0:low] *= 0
FFTRawSign[low:left] = np.cos(
np.linspace(np.pi / 2., np.pi, left - low)) ** 2 * np.exp(
1j * np.angle(FFTRawSign[low:left]))
# Pass band:
if freq_norm == 'phase_only':
FFTRawSign[left:right] = np.exp(1j * np.angle(FFTRawSign[left:right]))
elif freq_norm == 'rma':
tave = moving_ave(np.abs(FFTRawSign[left:right]),smooth_N)
FFTRawSign[left:right] = FFTRawSign[left:right]/tave
# Right tapering:
FFTRawSign[right:high] = np.cos(
np.linspace(0., np.pi / 2., high - right)) ** 2 * np.exp(
1j * | np.angle(FFTRawSign[right:high]) | numpy.angle |
import packratt
import pytest
from pathlib import Path
pmp = pytest.mark.parametrize
@pmp('do_beam', (False, True))
@pmp('do_gains', (False, True))
def test_forwardmodel(do_beam, do_gains, tmp_path_factory):
test_dir = tmp_path_factory.mktemp("test_pfb")
packratt.get('/test/ms/2021-06-24/elwood/test_ascii_1h60.0s.MS.tar', str(test_dir))
import numpy as np
np.random.seed(420)
from numpy.testing import assert_allclose
from pyrap.tables import table
ms = table(str(test_dir / 'test_ascii_1h60.0s.MS'), readonly=False)
spw = table(str(test_dir / 'test_ascii_1h60.0s.MS::SPECTRAL_WINDOW'))
utime = np.unique(ms.getcol('TIME'))
freq = spw.getcol('CHAN_FREQ').squeeze()
freq0 = np.mean(freq)
ntime = utime.size
nchan = freq.size
nant = np.maximum(ms.getcol('ANTENNA1').max(), ms.getcol('ANTENNA2').max()) + 1
ncorr = ms.getcol('FLAG').shape[-1]
uvw = ms.getcol('UVW')
nrow = uvw.shape[0]
u_max = abs(uvw[:, 0]).max()
v_max = abs(uvw[:, 1]).max()
uv_max = np.maximum(u_max, v_max)
# image size
from africanus.constants import c as lightspeed
cell_N = 1.0 / (2 * uv_max * freq.max() / lightspeed)
srf = 2.0
cell_rad = cell_N / srf
cell_size = cell_rad * 180 / np.pi
print("Cell size set to %5.5e arcseconds" % cell_size)
fov = 2
npix = int(fov / cell_size)
if npix % 2:
npix += 1
nx = npix
ny = npix
print("Image size set to (%i, %i, %i)" % (nchan, nx, ny))
# model
model = np.zeros((nchan, nx, ny), dtype=np.float64)
nsource = 10
Ix = np.random.randint(0, npix, nsource)
Iy = np.random.randint(0, npix, nsource)
alpha = -0.7 + 0.1 * np.random.randn(nsource)
I0 = 1.0 + np.abs(np.random.randn(nsource))
for i in range(nsource):
model[:, Ix[i], Iy[i]] = I0[i] * (freq/freq0) ** alpha[i]
if do_beam:
# primary beam
from katbeam import JimBeam
beam = JimBeam('MKAT-AA-L-JIM-2020')
l_coord = -np.arange(-(nx//2), nx//2) * cell_size
m_coord = np.arange(-(ny//2), ny//2) * cell_size
xx, yy = | np.meshgrid(l_coord, m_coord, indexing='ij') | numpy.meshgrid |
import numpy as np
import dist
import random
# computes query's neighbors weight and returns cpr
def predictor(query, neighbors, neighbors_ccr, neighbors_weight, method, param=None, inv_cov=None):
delta = neighbors - query
w = np.ones(len(neighbors))
for i in range(len(neighbors)):
if method == 'r': # reciprocal (1/d)
w[i] = dist.reciprocal(delta[i], inv_cov, neighbors_weight[i])
elif method == 'g': # gaussian (exp(-d**2))
w[i] = dist.gaussian(delta[i], param, inv_cov, neighbors_weight[i])
else: # 'ep' - epanechnikov ( 0.75 * (1-d**2/r**2) )
w[i] = dist.epanechnikov(delta[i], param, inv_cov, neighbors_weight[i])
cpr = | np.dot(w, neighbors_ccr) | numpy.dot |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Scaler(object):
"""
Iterative estimation of row and column centering/scaling
using the algorithm from page 31 of:
Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares
"""
def __init__(
self,
center_columns=True,
scale_columns=True,
min_value=None,
max_value=None,
verbose=True):
self.center_columns = center_columns
self.scale_columns = scale_columns
self.min_value = min_value
self.max_value = max_value
self.verbose = verbose
self.column_centers = None
self.column_scales = None
def fit(self, X):
if self.center_columns:
self.column_centers = np.nanmean(X, axis=0)
if self.scale_columns:
self.column_scales = np.nanstd(X, axis=0)
self.column_scales[self.column_scales == 0] = 1.0
return self
def transform(self, X):
X = np.asarray(X).copy()
if self.center_columns:
X -= self.column_centers
if self.scale_columns:
X /= self.column_scales
return X
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def inverse_transform(self, X):
X = np.asarray(X).copy()
if self.scale_columns:
X *= self.column_scales
if self.center_columns:
X += self.column_centers
return X
class BiScaler(object):
"""
Iterative estimation of row and column centering/scaling
using the algorithm from page 31 of:
Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares
"""
def __init__(
self,
center_rows=True,
center_columns=True,
scale_rows=True,
scale_columns=True,
min_value=None,
max_value=None,
max_iters=100,
tolerance=0.001,
verbose=True):
self.center_rows = center_rows
self.center_columns = center_columns
self.scale_rows = scale_rows
self.scale_columns = scale_columns
self.min_value = min_value
self.max_value = max_value
self.max_iters = max_iters
self.tolerance = tolerance
self.verbose = verbose
def estimate_row_means(
self,
X,
observed,
column_means,
column_scales):
"""
row_center[i] =
sum{j in observed[i, :]}{
(1 / column_scale[j]) * (X[i, j] - column_center[j])
}
------------------------------------------------------------
sum{j in observed[i, :]}{1 / column_scale[j]}
"""
n_rows, n_cols = X.shape
column_means = np.asarray(column_means)
if len(column_means) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_means.shape))
X = X - column_means.reshape((1, n_cols))
column_weights = 1.0 / column_scales
X *= column_weights.reshape((1, n_cols))
row_means = np.zeros(n_rows, dtype=X.dtype)
row_residual_sums = np.nansum(X, axis=1)
for i in range(n_rows):
row_mask = observed[i, :]
sum_weights = column_weights[row_mask].sum()
row_means[i] = row_residual_sums[i] / sum_weights
return row_means
def estimate_column_means(
self,
X,
observed,
row_means,
row_scales):
"""
column_center[j] =
sum{i in observed[:, j]}{
(1 / row_scale[i]) * (X[i, j]) - row_center[i])
}
------------------------------------------------------------
sum{i in observed[:, j]}{1 / row_scale[i]}
"""
n_rows, n_cols = X.shape
row_means = np.asarray(row_means)
if len(row_means) != n_rows:
raise ValueError("Expected length %d but got shape %s" % (
n_rows, row_means.shape))
column_means = np.zeros(n_cols, dtype=X.dtype)
X = X - row_means.reshape((n_rows, 1))
row_weights = 1.0 / row_scales
X *= row_weights.reshape((n_rows, 1))
col_residual_sums = np.nansum(X, axis=0)
for j in range(n_cols):
col_mask = observed[:, j]
sum_weights = row_weights[col_mask].sum()
column_means[j] = col_residual_sums[j] / sum_weights
return column_means
def center(self, X, row_means, column_means, inplace=False):
n_rows, n_cols = X.shape
row_means = np.asarray(row_means)
column_means = np.asarray(column_means)
if len(row_means) != n_rows:
raise ValueError("Expected length %d but got shape %s" % (
n_rows, row_means.shape))
if len(column_means) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_means.shape))
if not inplace:
X = X.copy()
X -= row_means.reshape((n_rows, 1))
X -= column_means.reshape((1, n_cols))
return X
def rescale(self, X, row_scales, column_scales, inplace=False):
if not inplace:
X = X.copy()
n_rows, n_cols = X.shape
X /= row_scales.reshape((n_rows, 1))
X /= column_scales.reshape((1, n_cols))
return X
def estimate_row_scales(
self,
X_centered,
column_scales):
"""
row_scale[i]**2 =
mean{j in observed[i, :]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
--------------------------------------------------
column_scale[j] ** 2
}
"""
n_rows, n_cols = X_centered.shape
column_scales = np.asarray(column_scales)
if len(column_scales) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_scales))
row_variances = np.nanmean(
X_centered ** 2 / (column_scales ** 2).reshape((1, n_cols)),
axis=1)
row_variances[row_variances == 0] = 1.0
assert len(row_variances) == n_rows, "%d != %d" % (
len(row_variances),
n_rows)
return np.sqrt(row_variances)
def estimate_column_scales(
self,
X_centered,
row_scales):
"""
column_scale[j] ** 2 =
mean{i in observed[:, j]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
-------------------------------------------------
row_scale[i] ** 2
}
"""
n_rows, n_cols = X_centered.shape
row_scales = np.asarray(row_scales)
if len(row_scales) != n_rows:
raise ValueError("Expected length %s, got shape %s" % (
n_rows, row_scales.shape,))
column_variances = np.nanmean(
X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)),
axis=0)
column_variances[column_variances == 0] = 1.0
assert len(column_variances) == n_cols, "%d != %d" % (
len(column_variances),
n_cols)
return np.sqrt(column_variances)
def residual(self, X_normalized):
total = 0
if self.center_rows:
row_means = np.nanmean(X_normalized, axis=1)
total += (row_means ** 2).sum()
if self.center_columns:
column_means = np.nanmean(X_normalized, axis=0)
total += (column_means ** 2).sum()
if self.scale_rows:
row_variances = np.nanvar(X_normalized, axis=1)
row_variances[row_variances == 0] = 1.0
total += (np.log(row_variances) ** 2).sum()
if self.scale_columns:
column_variances = np.nanvar(X_normalized, axis=0)
column_variances[column_variances == 0] = 1.0
total += (np.log(column_variances) ** 2).sum()
return total
def clamp(self, X, inplace=False):
if not inplace:
X = X.copy()
if self.min_value is not None:
X[X < self.min_value] = self.min_value
if self.max_value is not None:
X[X > self.max_value] = self.max_value
return X
def fit(self, X):
X = self.clamp(X)
n_rows, n_cols = X.shape
dtype = X.dtype
# To avoid inefficient memory access we keep around two copies
# of the array, one contiguous in the rows and the other
# contiguous in the columns
X_row_major = np.asarray(X, order="C")
X_column_major = np.asarray(X, order="F")
observed_row_major = ~np.isnan(X_row_major)
n_observed_per_row = observed_row_major.sum(axis=1)
n_empty_rows = (n_observed_per_row == 0).sum()
if n_empty_rows > 0:
raise ValueError("%d rows have no observed values" % n_empty_rows)
observed_column_major = np.asarray(observed_row_major, order="F")
n_observed_per_column = observed_column_major.sum(axis=0)
n_empty_columns = (n_observed_per_column == 0).sum()
if n_empty_columns > 0:
raise ValueError("%d columns have no observed values" % (
n_empty_columns,))
# initialize by assuming that rows are zero-mean/unit variance and
# with a direct estimate of mean and standard deviation
# of each column
row_means = np.zeros(n_rows, dtype=dtype)
row_scales = np.ones(n_rows, dtype=dtype)
if self.center_columns:
column_means = np.nanmean(X, axis=0)
else:
column_means = np.zeros(n_cols, dtype=dtype)
if self.scale_columns:
column_scales = np.nanstd(X, axis=0)
column_scales[column_scales == 0] = 1.0
else:
column_scales = np.ones(n_cols, dtype=dtype)
last_residual = self.residual(X)
if self.verbose:
print("[BiScaler] Initial log residual value = %f" % (
| np.log(last_residual) | numpy.log |
#!/usr/bin/python3
from __future__ import print_function
from __future__ import division
'''
This code implements elm controller
'''
# Basic
import numpy as np
from matplotlib import pyplot as plt
from scipy.linalg import pinv2
from tqdm import tqdm
import pandas as pd
import itertools
from argparse import ArgumentParser
import logging
import matplotlib.pyplot as plt
import math
from sys import path as sys_path
from os import path as os_path, times
# For dataset
from collections import deque, namedtuple
import random
# For solvers
from qpsolvers import solve_qp
from scipy.integrate import solve_ivp
from casadi import *
import casadi
# For estimators
from torch import nn
import torch
import torch.nn.functional as F
# Project packages
from system import ACC
from controller import LCBF, PID
from dataset import ELMDataset, NNDataset
from estimator import *
from normalizer import *
from functions import *
# For carla
from carla_utils import *
# Parameters
dt = 0.05
simTime = 40
# Real parameters
v_lead = 15
v_des = 18
# m = 1650.0
print("mass : ", m)
g = 9.81
f0 = 0*m
f1 = 0.182*m
f2 = -0.0004*m
c_a = 0.8
c_d = 0.8
Th = 1.8
# Nominal parameters
f0_nom = 2*f0
f1_nom = 2*f1
f2_nom = 2*f2
m_nom = 0.75* m
# QP-CLF-CBF parameters
p_slack = 1e-2
clf_rate = 5
cbf_rate = 5
torch.manual_seed(42)
def game_loop(args):
########################################
# System
########################################
acc = ACC(m, c_d, f0, f1, f2, v_lead)
derivator = Derivator(dt)
########################################
# Controller
########################################
cont = LCBF(m_nom, c_a, c_d, f0_nom, f1_nom, f2_nom, v_lead, v_des, Th, clf_rate, cbf_rate, p_slack)
########################################
# Estimator parameters
########################################
input_size = 3
hidden_size = 100
output_size = 1
learned_ratio = 1.1
time_th = learned_ratio* hidden_size
########################################
# PID control reference
########################################
x_dim = 3
u_dim = 1
kp = np.array([[0, 0.2, 0]])
kd = np.array([[0, 0, 0]])
ki = | np.array([[0, 0.2, 0]]) | numpy.array |
import os
import yaml
import numpy as np
import torch
def get_bin_vals(global_config):
"""
Creates bin values for grasping widths according to bounds defined in config
Arguments:
global_config {dict} -- config
Returns:
tf.constant -- bin value tensor
"""
bins_bounds = np.array(global_config['DATA']['labels']['offset_bins'])
if global_config['TEST']['bin_vals'] == 'max':
bin_vals = (bins_bounds[1:] + bins_bounds[:-1]) / 2
bin_vals[-1] = bins_bounds[-1]
elif global_config['TEST']['bin_vals'] == 'mean':
bin_vals = bins_bounds[1:]
else:
raise NotImplementedError
if not global_config['TEST']['allow_zero_margin']:
bin_vals = np.minimum(bin_vals, global_config['DATA']['gripper_width'] - global_config['TEST']['extra_opening'])
torch_bin_vals = torch.tensor(bin_vals, dtype=torch.float32)
return torch_bin_vals
def build_6d_grasp(approach_dirs, base_dirs, contact_pts, thickness, gripper_depth = 0.1034):
"""
Build 6-DoF grasps + width from point-wise network predictions
Arguments:
approach_dirs {np.ndarray/tf.tensor} -- Nx3 approach direction vectors
base_dirs {np.ndarray/tf.tensor} -- Nx3 base direction vectors
contact_pts {np.ndarray/tf.tensor} -- Nx3 contact points
thickness {np.ndarray/tf.tensor} -- Nx1 grasp width
Keyword Arguments:
use_tf {bool} -- whether inputs and outputs are tf tensors (default: {False})
gripper_depth {float} -- distance from gripper coordinate frame to gripper baseline in m (default: {0.1034})
Returns:
np.ndarray -- Nx4x4 grasp poses in camera coordinates
"""
test1 = torch.sum(base_dirs ** 2, 2)
test2 = torch.sum(approach_dirs ** 2, 2)
grasps_R = torch.stack([base_dirs, torch.cross(approach_dirs, base_dirs), approach_dirs], dim=3)
grasps_t = contact_pts + torch.unsqueeze(thickness, 2) / 2 * base_dirs - gripper_depth * approach_dirs
ones = torch.ones((contact_pts.shape[0], contact_pts.shape[1], 1, 1), dtype=torch.float32)
zeros = torch.zeros((contact_pts.shape[0], contact_pts.shape[1], 1, 3), dtype=torch.float32)
homog_vec = torch.cat((zeros, ones), dim=3)
grasps = torch.cat((torch.cat((grasps_R, torch.unsqueeze(grasps_t, 3)), dim=3), homog_vec), dim=2)
# else:
# grasps = []
# for i in range(len(contact_pts)):
# grasp = np.eye(4)
# grasp[:3,0] = base_dirs[i] / np.linalg.norm(base_dirs[i])
# grasp[:3,2] = approach_dirs[i] / np.linalg.norm(approach_dirs[i])
# grasp_y = np.cross( grasp[:3,2],grasp[:3,0])
# grasp[:3,1] = grasp_y / np.linalg.norm(grasp_y)
# # base_gripper xyz = contact + thickness / 2 * baseline_dir - gripper_d * approach_dir
# grasp[:3,3] = contact_pts[i] + thickness[i] / 2 * grasp[:3,0] - gripper_depth * grasp[:3,2]
# # grasp[0,3] = finger_width
# grasps.append(grasp)
# grasps = np.array(grasps)
return grasps
def recursive_key_value_assign(d, ks, v):
"""
Recursive value assignment to a nested dict
Arguments:
d {dict} -- dict
ks {list} -- list of hierarchical keys
v {value} -- value to assign
"""
if len(ks) > 1:
recursive_key_value_assign(d[ks[0]], ks[1:], v)
elif len(ks) == 1:
d[ks[0]] = v
def load_config(checkpoint_dir, batch_size=None, max_epoch=None, data_path=None, arg_configs=[], save=False):
"""
Loads yaml config file and overwrites parameters with function arguments and --arg_config parameters
Arguments:
checkpoint_dir {str} -- Checkpoint directory where config file was copied to
Keyword Arguments:
batch_size {int} -- [description] (default: {None})
max_epoch {int} -- "epochs" (number of scenes) to train (default: {None})
data_path {str} -- path to scenes with contact grasp data (default: {None})
arg_configs {list} -- Overwrite config parameters by hierarchical command line arguments (default: {[]})
save {bool} -- Save overwritten config file (default: {False})
Returns:
[dict] -- Config
"""
config_path = os.path.join(checkpoint_dir, 'config.yaml')
config_path = config_path if os.path.exists(config_path) else os.path.join(os.path.dirname(__file__), 'config.yaml')
with open(config_path, 'r') as f:
global_config = yaml.load(f, Loader=yaml.Loader)
for conf in arg_configs:
k_str, v = conf.split(':')
try:
v = eval(v)
except:
pass
ks = [int(k) if k.isdigit() else k for k in k_str.split('.')]
recursive_key_value_assign(global_config, ks, v)
if batch_size is not None:
global_config['OPTIMIZER']['batch_size'] = int(batch_size)
if max_epoch is not None:
global_config['OPTIMIZER']['max_epoch'] = int(max_epoch)
if data_path is not None:
global_config['DATA']['data_path'] = data_path
global_config['DATA']['classes'] = None
if save:
with open(os.path.join(checkpoint_dir, 'config.yaml'), 'w') as f:
yaml.dump(global_config, f)
return global_config
def distance_by_translation_point(p1, p2):
"""
Gets two nx3 points and computes the distance between point p1 and p2.
"""
return np.sqrt(np.sum(np.square(p1 - p2), axis=-1))
def farthest_points(data, nclusters, dist_func, return_center_indexes=False, return_distances=False, verbose=False):
"""
Performs farthest point sampling on data points.
Args:
data: numpy array of the data points.
nclusters: int, number of clusters.
dist_dunc: distance function that is used to compare two data points.
return_center_indexes: bool, If True, returns the indexes of the center of
clusters.
return_distances: bool, If True, return distances of each point from centers.
Returns clusters, [centers, distances]:
clusters: numpy array containing the cluster index for each element in
data.
centers: numpy array containing the integer index of each center.
distances: numpy array of [npoints] that contains the closest distance of
each point to any of the cluster centers.
"""
if nclusters >= data.shape[0]:
if return_center_indexes:
return np.arange(data.shape[0], dtype=np.int32), np.arange(data.shape[0], dtype=np.int32)
return np.arange(data.shape[0], dtype=np.int32)
clusters = np.ones((data.shape[0],), dtype=np.int32) * -1
distances = np.ones((data.shape[0],), dtype=np.float32) * 1e7
centers = []
for iter in range(nclusters):
index = np.argmax(distances)
centers.append(index)
shape = list(data.shape)
for i in range(1, len(shape)):
shape[i] = 1
broadcasted_data = np.tile(np.expand_dims(data[index], 0), shape)
new_distances = dist_func(broadcasted_data, data)
distances = | np.minimum(distances, new_distances) | numpy.minimum |
import torch
import torch.nn as nn
import numpy as np
from mmcv.cnn import ConvModule
from PIL import Image
from ..builder import HEADS
from .decode_head import BaseDecodeHead
import torch.nn.functional as F
from scipy.ndimage.morphology import distance_transform_edt
###seg to edge
def label_to_onehot(label, num_classes):
"""
Converts a segmentation mask (H,W) to (K,H,W) where the last dim is a one
hot encoding vector
"""
_label = [label == (i + 1) for i in range(num_classes)]
return np.array(_label).astype(np.uint8)
def onehot_to_label(label):
"""
Converts a mask (K,H,W) to (H,W)
"""
_label = np.argmax(label, axis=0)
_label[_label != 0] += 1
return _label
def onehot_to_multiclass_edges(label, radius, num_classes):
"""
Converts a segmentation mask (K,H,W) to an edgemap (K,H,W)
"""
if radius < 0:
return label
# We need to pad the borders for boundary conditions
label_pad = np.pad(label, ((0, 0), (1, 1), (1, 1)), mode='constant', constant_values=0)
channels = []
for i in range(num_classes):
dist = distance_transform_edt(label_pad[i, :])+distance_transform_edt(1.0-label_pad[i, :])
dist = dist[1:-1, 1:-1]
dist[dist > radius] = 0
dist = (dist > 0).astype(np.uint8)
channels.append(dist)
return | np.array(channels) | numpy.array |
import math
import time
from math import floor, sqrt
import SimpleITK as sitk
import numpy as np
def modified_LC(maskImg, comp=1):
# initialize the connected component filter
ccFilter = sitk.ConnectedComponentImageFilter()
# apply the filter to the input image
labelImg = ccFilter.Execute(maskImg)
# get the number of labels (connected components)
numberOfLabels = ccFilter.GetObjectCount()
# extract the data array from the itk object
labelArray = sitk.GetArrayFromImage(labelImg)
# count the voxels belong to different components
labelSizes = np.bincount(labelArray.flatten())
labelSizes2 = np.asarray(sorted(labelSizes, reverse=True))
# get the largest connected component
# largestLabel = np.argmax(labelSizes[4:]) + 1
if(comp>=labelSizes2.shape[0]):
return None
else:
largestLabel = np.argwhere(labelSizes == labelSizes2[comp])[0][0]
# convert the data array to itk object
outImg = sitk.GetImageFromArray((labelArray == largestLabel).astype(np.int16))
# output image should have same metadata as input mask image
outImg.CopyInformation(maskImg)
return outImg
def idx_to_coor(n, k):
i = floor((-sqrt((2*n+1)*(2*n+1)-8*k)+2*n+1)/2)
j = k + i - i*(2*n-i+1)//2
return i, j
def modified_LC_SV(maskImg, numComps, idx):
if (idx <= numComps):
predicted_image_lc = modified_LC(maskImg, idx)
else:
i, j = idx_to_coor(numComps, idx-1)
j += 1
predicted_image_lc_i = modified_LC(maskImg, i)
predicted_image_lc_j = modified_LC(maskImg, j)
predicted_image_lc = predicted_image_lc_i + predicted_image_lc_j
predicted_image_lc = sitk.Cast(predicted_image_lc, sitk.sitkUInt8)
return predicted_image_lc
def resampler_sitk_Seg(image_sitk, predicted_image, spacing=[1.0, 1.0, 1.0], default_pixel_value=0,
interpolator=sitk.sitkNearestNeighbor, dimension=3, rnd=5):
ratio = [round(spacing_dim / spacing[i], 6) for i, spacing_dim in enumerate(image_sitk.GetSpacing())]
ImRef = sitk.Image(tuple(math.ceil(size_dim * ratio[i]) for i, size_dim in enumerate(image_sitk.GetSize())),
sitk.sitkInt16)
#ImRef.SetOrigin(image_sitk.GetOrigin())
ImRef.SetOrigin(predicted_image.GetOrigin())
ImRef.SetDirection(image_sitk.GetDirection())
ImRef.SetSpacing(spacing)
identity = sitk.Transform(dimension, sitk.sitkIdentity)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(ImRef)
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(default_pixel_value)
resampler.SetTransform(identity)
resampled_sitk = resampler.Execute(image_sitk)
return resampled_sitk
def resampler_sitk_Reg(image_sitk, spacing=[1.0, 1.0, 1.0], default_pixel_value=0,
interpolator=sitk.sitkNearestNeighbor, dimension=3, rnd=3):
ratio = [spacing_dim / spacing[i] for i, spacing_dim in enumerate(image_sitk.GetSpacing())]
ImRef = sitk.Image(tuple(math.ceil(size_dim * ratio[i]) for i, size_dim in enumerate(image_sitk.GetSize())),
sitk.sitkInt16)
ImRef.SetOrigin(image_sitk.GetOrigin())
ImRef.SetDirection(image_sitk.GetDirection())
ImRef.SetSpacing(spacing)
identity = sitk.Transform(dimension, sitk.sitkIdentity)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(ImRef)
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(default_pixel_value)
resampler.SetTransform(identity)
resampled_sitk = resampler.Execute(image_sitk)
return resampled_sitk
def DSC_MSD_HD95_Seg(groundtruth_image_itk, predicted_image, num_of_components,resample_flag=True, resample_spacing=[1.0, 1.0, 1.0]):
if resample_flag:
groundtruth_image_itk = resampler_sitk_Seg(image_sitk=groundtruth_image_itk, predicted_image=predicted_image,
spacing=resample_spacing,
default_pixel_value=0,
interpolator=sitk.sitkNearestNeighbor, dimension=3, rnd=3)
groundtruth_image_itk = sitk.Cast(groundtruth_image_itk, sitk.sitkUInt8)
# predicted_image = sitk.Cast(predicted_image, sitk.sitkUInt8)
size_diff = np.sum(np.subtract(groundtruth_image_itk.GetSize(), predicted_image.GetSize()))
if size_diff > 0:
if size_diff == 1:
groundtruth_image_itk = groundtruth_image_itk[:, :, :-1]
if size_diff == 2:
groundtruth_image_itk = groundtruth_image_itk[:-1, :-1, :]
elif size_diff == 3:
groundtruth_image_itk = groundtruth_image_itk[:-1, :-1, :-1]
else:
print(size_diff)
elif size_diff < 0:
if size_diff == -2:
predicted_image = predicted_image[:-1, :-1, :]
elif size_diff == -3:
predicted_image = predicted_image[:-1, :-1, :-1]
else:
print(size_diff)
else:
pass
label_overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
hausdorff_distance_image_filter = sitk.HausdorffDistanceImageFilter()
dsc_test = []
startTime = 0
isSV = (num_of_components > 10) # TODO: this should be a function parameter instead.
for i in range(1, 50): # MAX NUM COMPONENTS TO CONSIDER
if i == num_of_components:
startTime = time.time()
predicted_image_lc = modified_LC(predicted_image, i)
if predicted_image_lc is None:
break
predicted_image_lc = sitk.Cast(predicted_image_lc, sitk.sitkUInt8)
try:
label_overlap_measures_filter.Execute(groundtruth_image_itk, predicted_image_lc)
except:
pass
val = label_overlap_measures_filter.GetDiceCoefficient()
dsc_test.append(val)
if val > 0.5 and not isSV:
break
numComps = len(dsc_test)
#TODO: loop ranges could be until numComps, that would be faster than num_of_components
#TODO: I guess you could ignore a component if it has DSC 0.0, but that'd not be worth the effort to code.
if isSV and numComps > 1:
if startTime == 0:
startTime = time.time()
for i in range(1, 50): # MAX NUM COMPONENTS TO CONSIDER FOR COMBINED SV
predicted_image_lc_i = modified_LC(predicted_image, i)
if predicted_image_lc_i is None:
break
# print(" SV " + repr(i) + ", dsc " + repr(dsc_test[i-1]))
for j in range(i + 1, 50): # MAX NUM COMPONENTS TO CONSIDER FOR COMBINED SV
predicted_image_lc_j = modified_LC(predicted_image, j)
if predicted_image_lc_j is None:
break
predicted_image_lc = predicted_image_lc_i + predicted_image_lc_j
predicted_image_lc = sitk.Cast(predicted_image_lc, sitk.sitkUInt8)
try:
label_overlap_measures_filter.Execute(groundtruth_image_itk, predicted_image_lc)
except:
pass
val = label_overlap_measures_filter.GetDiceCoefficient()
dsc_test.append(val)
assert(len(dsc_test) == (numComps+1)*numComps/2)
if startTime != 0:
endTime = time.time()
# print("Extra time: " + repr(endTime - startTime) + ".")
else:
# print("No time added.")
pass
if predicted_image_lc is None and i == 1: #Needed to prevent crash if the network did not predict an organ at all
print("No predicted pixels for this organ...!")
predicted_image[0, 0, 0] = 1
predicted_image = sitk.Cast(predicted_image, sitk.sitkUInt8)
dsc_test = []
else:
LC = np.argmax(np.asarray(dsc_test)) + 1
dsc_test = []
predicted_image = modified_LC_SV(predicted_image, numComps, LC)
label_overlap_measures_filter.Execute(groundtruth_image_itk, predicted_image)
dice = label_overlap_measures_filter.GetDiceCoefficient()
# print(" dsc = " + repr(dice))
jaccard = label_overlap_measures_filter.GetJaccardCoefficient()
vol_similarity = label_overlap_measures_filter.GetVolumeSimilarity()
hausdorff_distance_image_filter.Execute(groundtruth_image_itk, predicted_image)
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(groundtruth_image_itk, squaredDistance=False, useImageSpacing=True))
reference_surface = sitk.LabelContour(groundtruth_image_itk)
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
segmented_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(predicted_image, squaredDistance=False, useImageSpacing=True))
segmented_surface = sitk.LabelContour(predicted_image)
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
# Get all non-zero distances and then add zero distances if required.
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))
all_surface_distances = seg2ref_distances + ref2seg_distances
msd = np.mean(all_surface_distances)
hd_percentile = np.maximum(np.percentile(seg2ref_distances, 95), np.percentile(ref2seg_distances, 95))
return dice, msd, hd_percentile, jaccard, vol_similarity
def DSC_MSD_HD95_Reg(groundtruth_image_itk, predicted_image, resample_flag=True, resample_spacing=[1.0, 1.0, 1.0]):
if resample_flag:
groundtruth_image_itk = resampler_sitk_Reg(image_sitk=groundtruth_image_itk, spacing=resample_spacing,
default_pixel_value=0,
interpolator=sitk.sitkNearestNeighbor, dimension=3, rnd=3)
groundtruth_image_itk = sitk.Cast(groundtruth_image_itk, sitk.sitkUInt8)
predicted_image = sitk.Cast(predicted_image, sitk.sitkUInt8)
size_diff = np.sum(np.subtract(groundtruth_image_itk.GetSize(), predicted_image.GetSize()))
if size_diff > 0:
if size_diff == 2:
groundtruth_image_itk = groundtruth_image_itk[:-1, :-1, :]
elif size_diff == 2:
groundtruth_image_itk = groundtruth_image_itk[:-1, :-1, :-1]
elif size_diff == 1:
groundtruth_image_itk = groundtruth_image_itk[:, :, :-1]
else:
print(size_diff)
elif size_diff < 0:
if size_diff == -2:
predicted_image = predicted_image[:-1, :-1, :]
elif size_diff == -3:
predicted_image = predicted_image[:-1, :-1, :-1]
elif size_diff == -1:
predicted_image = predicted_image[:, :, :-1]
else:
print(size_diff)
else:
pass
try:
label_overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
label_overlap_measures_filter.Execute(groundtruth_image_itk, predicted_image)
dice = label_overlap_measures_filter.GetDiceCoefficient()
jaccard = label_overlap_measures_filter.GetJaccardCoefficient()
vol_similarity = label_overlap_measures_filter.GetVolumeSimilarity()
hausdorff_distance_image_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_image_filter.Execute(groundtruth_image_itk, predicted_image)
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(groundtruth_image_itk, squaredDistance=False, useImageSpacing=True))
reference_surface = sitk.LabelContour(groundtruth_image_itk)
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
segmented_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(predicted_image, squaredDistance=False, useImageSpacing=True))
segmented_surface = sitk.LabelContour(predicted_image)
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
# Get all non-zero distances and then add zero distances if required.
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))
all_surface_distances = seg2ref_distances + ref2seg_distances
msd = np.mean(all_surface_distances)
hd_percentile = np.maximum(np.percentile(seg2ref_distances, 95), | np.percentile(ref2seg_distances, 95) | numpy.percentile |
import logging
import random
import numpy as np
import pandas as pd
from numpy.lib.stride_tricks import sliding_window_view
from scipy.ndimage.filters import uniform_filter1d
import basty.utils.misc as misc
class BodyPose:
def __init__(self, pose_cfg={}):
self.pose_cfg = pose_cfg.copy()
self.logger = logging.getLogger("main")
self.counterparts = self.pose_cfg.get("counterparts", {})
self.singles = self.pose_cfg.get("singles", [])
self.connected_parts = self.pose_cfg.get("connected_parts", [])
self.groups = self.pose_cfg.get("groups", {})
self.defined_points = self.pose_cfg.get("defined_points", {})
self.centerline = self.pose_cfg.get("centerline", [])
if len(self.centerline) != 2 and len(self.centerline) != 0:
raise ValueError(
"A centerline must be defined by two body-parts"
+ f"Given value {self.centerline} is not suitable for definition."
)
@staticmethod
def get_sub_df_pose(df_pose, ind):
if ind not in ["x", "y"]:
raise ValueError(f"Given sub-dataframe indicator {ind} is not defined.")
sub_df = df_pose[df_pose.columns[df_pose.columns.str.endswith(ind)]]
sub_df.columns = [name[:-2] for name in sub_df.columns]
return sub_df
@staticmethod
def get_sub_df_coord(df_coord, ind):
if ind not in ["x", "y", "likelihood"]:
raise ValueError(f"Given sub-dataframe indicator {ind} is not defined.")
try:
sub_df = df_coord[df_coord.columns[df_coord.loc[1] == ind]]
sub_df.columns = sub_df.iloc[0]
sub_df = sub_df.drop([0, 1]).fillna(0).reset_index(drop=True)
assert sub_df.shape[1] > 0
except AssertionError:
sub_df = df_coord[df_coord.columns[df_coord.loc[2] == ind]]
sub_df.columns = sub_df.iloc[1]
sub_df = sub_df.drop([0, 1, 2]).fillna(0).reset_index(drop=True)
return sub_df
def split_sub_df_coord(self, df_coord):
llh = self.get_sub_df_coord(df_coord, "likelihood").astype(float)
x = self.get_sub_df_coord(df_coord, "x").astype(float)
y = self.get_sub_df_coord(df_coord, "y").astype(float)
return llh, x, y
def make_frames_egocentric(self, df_pose, per_frame=False):
spine = self.centerline
eps = np.finfo(np.float32).eps
if len(spine) == 0:
raise ValueError(
"A centerline must be given to construct egencentric frames."
)
dfPose_x = self.get_sub_df_pose(df_pose, "x")
dfPose_y = self.get_sub_df_pose(df_pose, "y")
dfPose_new_dict = {}
if per_frame:
raise NotImplementedError(
"Constructing egocentric frames per frame is not implemented yet."
)
else:
s1_x = dfPose_x[spine[0]].to_numpy()
s1_y = dfPose_y[spine[0]].to_numpy()
s2_x = dfPose_x[spine[1]].to_numpy()
s2_y = dfPose_y[spine[1]].to_numpy()
r2_shifted = np.stack((s2_x - s1_x, s2_y - s1_y), axis=1)
r2_x = np.linalg.norm(r2_shifted, axis=1)
r2_y = np.zeros(s2_y.shape[0])
r2 = np.stack((r2_x, r2_y), axis=1)
def get_rotation_matrix(v1, v2):
r1, r2 = (v1 / np.linalg.norm(v1)).reshape(2), (
v2 / np.linalg.norm(v2)
).reshape(2)
rc = np.cross(r1, r2)
rd = np.dot(r1, r2)
rc_norm = np.linalg.norm(rc)
kmat = np.array([[0, -rc], [rc, 0]])
rotation_matrix = (
np.eye(2)
+ kmat
+ kmat.dot(kmat) * ((1 - rd) / (rc_norm + eps) ** 2)
)
return rotation_matrix
egocentric_transform = np.ndarray((r2.shape[0], r2.shape[1], r2.shape[1]))
for i in range(egocentric_transform.shape[0]):
egocentric_transform[i] = get_rotation_matrix(
r2_shifted[i, :], r2[i, :]
)
# rs_pinv = np.linalg.pinv(r2_shifted[:,:,np.newaxis])
# egocentric_transform = np.matmul(r2[:, :, np.newaxis], rs_pinv)
for name in dfPose_x.columns:
r = np.stack(
(
dfPose_x[name].to_numpy() - s1_x,
dfPose_y[name].to_numpy() - s1_y,
),
axis=1,
)
r_new = np.matmul(egocentric_transform, r[:, :, np.newaxis])
dfPose_new_dict[name + "_x"] = r_new[:, 0, 0]
dfPose_new_dict[name + "_y"] = r_new[:, 1, 0]
# It's very likely for df_pose to contain NaNs at this point.
# Interpolating NaN's might be a good idea.
dfEgocentric = pd.DataFrame.from_dict(dfPose_new_dict)
return dfEgocentric
def _finalize_orientation(self, orientations, left_llh, right_llh, winsize=3):
left_llh = uniform_filter1d(left_llh, size=winsize)
right_llh = uniform_filter1d(right_llh, size=winsize)
left_idx = np.asarray(left_llh > right_llh).nonzero()[0]
right_idx = np.asarray(right_llh > left_llh).nonzero()[0]
orientations["left"].update(left_idx)
orientations["right"].update(right_idx)
orientations["idx"].difference_update(left_idx)
orientations["idx"].difference_update(right_idx)
for idx in orientations["idx"]:
lc = np.abs(np.min(np.array(list(orientations["left"])) - idx))
rc = np.abs(np.min(np.array(list(orientations["right"])) - idx))
if lc < rc:
orientations["left"].add(idx)
elif rc < lc:
orientations["right"].add(idx)
else:
rand_orient = random.choice(["right", "left"])
orientations[rand_orient].add(idx)
orientations["idx"].difference_update(orientations["idx"])
return orientations
def _window_count_orientation(self, orientations, left_llh, right_llh, winsize):
likely_left = left_llh > right_llh
for idx, window in enumerate(sliding_window_view(likely_left, 2 * winsize + 1)):
if idx in orientations["idx"]:
left_count = | np.count_nonzero(window) | numpy.count_nonzero |
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import sys
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
r = 4
#loading data
#X = np.load('../Data/data0.npy')
#y = np.load('../Data/lab0.npy')
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#print("split done")
## Padding to make it square
#tp = ((0, 0), (64, 64), (0, 0))
#X_train = np.pad(X_train, pad_width=tp, mode='constant', constant_values=0)
#X_train = torch.Tensor([[i] for i in X_train])
#X_test = np.pad(X_test, pad_width=tp, mode='constant', constant_values=0)
#X_test = torch.Tensor([[i] for i in X_test])
batch_size = 500
print("Converted to tensor")
class DigitAdditionDataset(Dataset):
def __init__(self, X, y):
self.x = X
self.n_samples = X.shape[0]
self.y = torch.Tensor(y).long()
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_samples
#traindataset = DigitAdditionDataset(X_train, y_train)
#valdataset = DigitAdditionDataset(X_test, y_test)
#print("dataloader made")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layerR1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.ReLU())
self.layerR2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.ReLU()
)
self.layerR3 = nn.Sequential(
nn.Conv2d(32, 48, kernel_size=5, stride=1, padding=2),
nn.ReLU()
)
# (32, 40 , 168) -> (4, 40, 84)
self.layerR4 = nn.Sequential(
nn.Conv2d(48, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)))
# (4, 40, 84) -> (48, 40, 42)
self.layer1 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)))
# (48, 40, 42) -> (128, 22, 22)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
nn.ReLU())
self.layer3 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=5, stride=1, padding=(4,3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# (128, 22, 22) -> (192, 11, 11)
self.layer4 = nn.Sequential(
nn.Conv2d(128, 192, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# (192, 11, 11) -> (192, 12, 12)
self.layer5 = nn.Sequential(
nn.Conv2d(192, 192, kernel_size=4, stride=1, padding=2),
nn.ReLU())
# (192, 12, 12) -> (128, 6, 6)
self.layer6 = nn.Sequential(
nn.Conv2d(192, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc1 = nn.Linear(128*6*6, 128*6*6)
self.drop1 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(128*6*6, 2000)
self.drop2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(2000, 37)
#self.res1 = nn.Linear(2000, 10)
#self.res2 = nn.Linear(2000, 10)
#self.res3 = nn.Linear(2000, 10)
#self.res4 = nn.Linear(2000, 10)
## (10, 1, 4) -> (50, 1, 1)
#self.lconv = nn.Conv2d(10, 50, kernel_size=(4,1),stride=1,padding=0)
def forward(self, x):
out = self.layerR1(x)
out = self.layerR2(out)
out = self.layerR3(out)
out = self.layerR4(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = out.reshape(out.size(0), -1)
#print(out.shape)
out = F.relu(self.fc1(out))
#print(out.shape)
out = self.drop1(out)
out = F.relu(self.fc2(out))
out = self.drop2(out)
out = self.fc3(out)
return out
# In[153]:
model = Net()
model= nn.DataParallel(model)
model = model.cuda()
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
print("model made")
# In[154]:
# Train the model
def train_model(model, trainloader, valoader, num_epochs, criterion, optimizer, saveweights=True, eval_pass=False, weightsfile="./trained_model"):
print("starting train")
torch.cuda.empty_cache()
if eval_pass:
num_epochs = 1
total_step = len(trainloader)
train_loss_list = []
train_acc_list = []
val_acc_list = []
val_loss_list = []
for epoch in range(num_epochs):
if not eval_pass:
for i, (images, label) in enumerate(trainloader):
model.train()
# Run the forward pass
images = images.cuda()
label = label.cuda()
outputs = model(images)
#print("OUTPUT DEVICE", outputs.device, label.device)
loss = criterion(outputs, label)
#train_loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track the accuracy
total = label.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == label).sum().item()
del label
del images
#train_acc_list.append(correct / total)
print('Training: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
(correct / total) * 100))
train_acc_list.append(correct / total)
train_loss_list.append(loss.item())
torch.cuda.empty_cache()
for images, label in valoader:
model.eval()
# Run the forward pass
images = images.cuda()
label = label.cuda()
outputs = model(images)
#print("OUTPUT DEVICE", outputs.device, label.device)
loss = criterion(outputs, label)
# Track the accuracy
total = label.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == label).sum().item()
val_acc_list.append(correct / total)
val_loss_list.append(loss.item())
if epoch % 10 == 0:
torch.save(model.state_dict(), 'output/model' + str(r) + '_' + str(epoch))
print('Validation: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
(correct / total) * 100))
if saveweights:
torch.save(model.state_dict(), './trained_model')
plt.title("Curve:Loss")
plt.plot(range(len(train_loss_list)), train_loss_list, label="Train")
plt.plot(range(len(train_loss_list)), val_loss_list, label="Validation")
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig('output/' + str(r) + 'loss_curve.png')
plt.close()
plt.title("Curve:Accuracy")
plt.plot(range(len(train_loss_list)), train_acc_list, label="Train")
plt.plot(range(len(train_loss_list)), val_acc_list, label="Validation")
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig('output/' + str(r) + 'acc_curve.png')
return model
def test_model(model, testloader, criterion):
total_correct = 0
total_loss = 0
n = 0
for images, label in testloader:
model.eval()
# Run the forward pass
images = images.cuda()
label = label.cuda()
outputs = model(images)
loss = criterion(outputs, label)
# Track the accuracy
total = label.size(0)
n += total
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == label).sum().item()
total_correct += correct
total_loss += loss.item()
accuracy = total_correct / n * 100
loss = total_loss / len(testloader)
print("Test: Accuracy:", accuracy, "Loss:", loss)
if __name__ == '__main__':
# init model
print(sys.argv[1])
if sys.argv[1] == 'train':
# Loading Data
if len(sys.argv) < 4:
X = np.load('../Data/data0.npy')
y = np.load('../Data/lab0.npy')
Xt = | np.load('../Data/data1.npy') | numpy.load |
import numpy as np
from numpy import linalg as la
class cylinder: # Класс, описывающий цилиндр
def __init__(self, o, a, b, c):
# o - центр основания, a, b - оси эллпса, c - центральная ось цилиндра
self.o = o
self.a = a
self.b = b
self.c = c
def check(self): # Проверка корректности задания цилиндра
if np.all(np.matmul(np.transpose(self.a), self.b) == 0):
# a и b должны быть ортогональны
matr = np.hstack((self.a, self.b, self.c))
# a, b и c должны быть ЛНЗ системой
if la.det(matr) != 0:
return True
return False
def get_translation(self):
# Возвращает вектор параллельного переноса (для смены базиса)
return -self.o
def get_matrix(self):
# Возвращет матрицу перехода в базис (a, b, c)
return la.inv(np.hstack((self.a, self.b, self.c)))
class parallelepiped: # Класс, описывающий параллелограмм
def __init__(self, o, a, b, c):
self.o = o
self.a = a
self.b = b
self.c = c
def check(self):
# Проверка корректности задания цилиндра
matr = np.hstack((self.a, self.b, self.c))
# a, b и c должны быть ЛНЗ системой
if la.det(matr) != 0:
return True
return False
def transform(self, matr, delta): # Преобразование координат
self.o = np.matmul(matr, self.o + delta) # delta - вектор переноса
self.a = np.matmul(matr, self.a) # matr - матрица перехода
self.b = np.matmul(matr, self.b)
self.c = np.matmul(matr, self.c)
def _get_X_4(self): # Возвращает матрицу X_4 (описана в тексте)
matr = | np.hstack((self.o, self.a, self.b, self.c)) | numpy.hstack |
from scipy.signal import argrelextrema, savgol_filter
from scipy.interpolate import interp1d
from astropy.io import fits
from scipy import integrate
from iminuit import Minuit
import matplotlib.pyplot as plt
import numpy as np
import os
import astropy
from spectractor import parameters
from spectractor.config import set_logger, load_config, apply_rebinning_to_parameters
from spectractor.extractor.dispersers import Hologram
from spectractor.extractor.targets import load_target
from spectractor.tools import (ensure_dir, load_fits, plot_image_simple,
find_nearest, plot_spectrum_simple, fit_poly1d_legendre, gauss,
rescale_x_for_legendre, fit_multigauss_and_bgd, multigauss_and_bgd)
from spectractor.extractor.psf import load_PSF
from spectractor.extractor.chromaticpsf import ChromaticPSF
from spectractor.simulation.adr import adr_calib, flip_and_rotate_adr_to_image_xy_coordinates
from spectractor.simulation.throughput import TelescopeTransmission
fits_mappings = {'date_obs': 'DATE-OBS',
'expo': 'EXPTIME',
'airmass': 'AIRMASS',
'disperser_label': 'GRATING',
'units': 'UNIT2',
'rotation_angle': 'ROTANGLE',
'dec': 'DEC',
'hour_angle': 'HA',
'temperature': 'OUTTEMP',
'pressure': 'OUTPRESS',
'humidity': 'OUTHUM',
'lambda_ref': 'LBDA_REF',
'parallactic_angle': 'PARANGLE',
}
class Spectrum:
""" Class used to store information and methods relative to spectra and their extraction.
Attributes
----------
my_logger: logging
Logging object
fast_load: bool
If True, only load the spectrum but not the spectrogram.
units: str
Units of the spectrum.
lambdas: array
Spectrum wavelengths in nm.
data: array
Spectrum amplitude array in self.units units.
err: array
Spectrum amplitude uncertainties in self.units units.
cov_matrix: array
Spectrum amplitude covariance matrix between wavelengths in self.units units.
lambdas_binwidths: array
Bin widths of the wavelength array in nm.
lambdas_order2: array
Spectrum wavelengths for order 2 contamination in nm.
data_order2: array
Spectrum amplitude array for order 2 contamination in self.units units.
err_order2: array
Spectrum amplitude uncertainties for order 2 contamination in self.units units.
lambda_ref: float
Reference wavelength for ADR computations in nm.
order: int
Index of the diffraction order.
x0: array
Target position [x,y] in the image in pixels.
psf: PSF
PSF instance to model the spectrum PSF.
chromatic_psf: ChromaticPSF
ChromaticPSF object that contains data on the PSF shape and evolution in wavelength.
date_obs: str
Date of the observation.
airmass: float
Airmass of the current target.
expo: float
Exposure time in seconds.
disperser_label: str
Label of the disperser.
filter_label: str:
Label of the filter.
rotation_angle: float
Dispersion axis angle in the image in degrees, positive if anticlockwise.
parallactic_angle: float
Parallactic angle in degrees.
lines: Lines
Lines instance that contains data on the emission or absorption lines to be searched and fitted in the spectrum.
header: Fits.Header
FITS file header.
disperser: Disperser
Disperser instance that describes the disperser.
target: Target
Target instance that describes the current exposure.
dec: float
Declination coordinate of the current exposure.
hour_angle float
Hour angle coordinate of the current exposure.
temperature: float
Outside temperature in Celsius degrees.
pressure: float
Outside pressure in hPa.
humidity: float
Outside relative humidity in fraction of one.
spectrogram: array
Spectrogram 2D image in image units.
spectrogram_bgd: array
Estimated 2D background fitted below the spectrogram in image units.
spectrogram_bgd_rms: array
Estimated 2D background RMS fitted below the spectrogram in image units.
spectrogram_err: array
Estimated 2D background uncertainty fitted below the spectrogram in image units.
spectrogram_fit: array
Best fitting model of the spectrogram in image units.
spectrogram_residuals: array
Residuals between the spectrogram data and the best fitting model of the spectrogram in image units.
spectrogram_x0: float
Relative position of the target in the spectrogram array along the x axis.
spectrogram_y0: float
Relative position of the target in the spectrogram array along the y axis.
spectrogram_xmin: int
Left index of the spectrogram crop in the image.
spectrogram_xmax: int
Right index of the spectrogram crop in the image.
spectrogram_ymin: int
Bottom index of the spectrogram crop in the image.
spectrogram_ymax: int
Top index of the spectrogram crop in the image.
spectrogram_deg: int
Degree of the polynomial functions to model wavelength evolutions of the PSF parameters.
spectrogram_saturation: float
Level of saturation in the spectrogram in image units.
spectrogram_Nx: int
Size of the spectrogram along the x axis.
spectrogram_Ny: int
Size of the spectrogram along the y axis.
"""
def __init__(self, file_name="", image=None, order=1, target=None, config="", fast_load=False,
spectrogram_file_name_override=None,
psf_file_name_override=None,):
""" Class used to store information and methods relative to spectra and their extraction.
Parameters
----------
file_name: str, optional
Path to the spectrum file (default: "").
image: Image, optional
Image object from which to create the Spectrum object:
copy the information from the Image header (default: None).
order: int
Order of the spectrum (default: 1)
target: Target, optional
Target object if provided (default: None)
config: str, optional
A config file name to load some parameter values for a given instrument (default: "").
fast_load: bool, optional
If True, only the spectrum is loaded (not the PSF nor the spectrogram data) (default: False).
Examples
--------
Load a spectrum from a fits file
>>> s = Spectrum(file_name='tests/data/reduc_20170605_028_spectrum.fits')
>>> print(s.order)
1
>>> print(s.target.label)
PNG321.0+3.9
>>> print(s.disperser_label)
HoloPhAg
Load a spectrum from a fits image file
>>> from spectractor.extractor.images import Image
>>> image = Image('tests/data/reduc_20170605_028.fits', target_label='PNG321.0+3.9')
>>> s = Spectrum(image=image)
>>> print(s.target.label)
PNG321.0+3.9
"""
self.fast_load = fast_load
self.my_logger = set_logger(self.__class__.__name__)
if config != "":
load_config(config)
self.target = target
self.data = None
self.err = None
self.cov_matrix = None
self.x0 = None
self.pixels = None
self.lambdas = None
self.lambdas_binwidths = None
self.lambdas_indices = None
self.lambda_ref = None
self.order = order
self.chromatic_psf = None
self.filter_label = ""
self.filters = None
self.units = 'ADU/s'
self.gain = parameters.CCD_GAIN
self.psf = load_PSF(psf_type="Moffat", target=self.target)
self.chromatic_psf = ChromaticPSF(self.psf, Nx=1, Ny=1, deg=1, saturation=1)
self.rotation_angle = 0
self.parallactic_angle = None
self.spectrogram = None
self.spectrogram_bgd = None
self.spectrogram_bgd_rms = None
self.spectrogram_err = None
self.spectrogram_residuals = None
self.spectrogram_fit = None
self.spectrogram_x0 = None
self.spectrogram_y0 = None
self.spectrogram_xmin = None
self.spectrogram_xmax = None
self.spectrogram_ymin = None
self.spectrogram_ymax = None
self.spectrogram_deg = None
self.spectrogram_saturation = None
self.spectrogram_Nx = None
self.spectrogram_Ny = None
self.lambdas_order2 = None
self.data_order2 = None
self.err_order2 = None
self.dec = None
self.hour_angle = None
self.temperature = None
self.pressure = None
self.humidity = None
self.parallactic_angle = None
self.filename = file_name
if file_name != "":
self.load_spectrum(file_name,
spectrogram_file_name_override=spectrogram_file_name_override,
psf_file_name_override=psf_file_name_override)
if image is not None:
self.header = image.header
self.date_obs = image.date_obs
self.airmass = image.airmass
self.expo = image.expo
self.filters = image.filters
self.filter_label = image.filter_label
self.disperser_label = image.disperser_label
self.disperser = image.disperser
self.target = image.target
self.lines = self.target.lines
self.x0 = image.target_pixcoords
self.target_pixcoords = image.target_pixcoords
self.target_pixcoords_rotated = image.target_pixcoords_rotated
self.units = image.units
self.gain = image.gain
self.rotation_angle = image.rotation_angle
self.my_logger.info('\n\tSpectrum info copied from image')
self.dec = image.dec
self.hour_angle = image.hour_angle
self.temperature = image.temperature
self.pressure = image.pressure
self.humidity = image.humidity
self.parallactic_angle = image.parallactic_angle
self.adr_params = [self.dec, self.hour_angle, self.temperature, self.pressure,
self.humidity, self.airmass]
self.load_filter()
def convert_from_ADUrate_to_flam(self):
"""Convert units from ADU/s to erg/s/cm^2/nm.
The SED is supposed to be in flam units ie erg/s/cm^2/nm
Examples
--------
>>> s = Spectrum(file_name='tests/data/reduc_20170605_028_spectrum.fits')
>>> s.convert_from_ADUrate_to_flam()
.. doctest::
:hide:
>>> assert np.max(s.data) < 1e-2
>>> assert np.max(s.err) < 1e-2
"""
if self.units == 'erg/s/cm$^2$/nm' or self.units == "flam":
self.my_logger.warning(f"You ask to convert spectrum already in {self.units}"
f" in erg/s/cm^2/nm... check your code ! Skip the instruction.")
return
ldl = parameters.FLAM_TO_ADURATE * self.lambdas * np.abs(self.lambdas_binwidths)
self.data /= ldl
if self.err is not None:
self.err /= ldl
if self.cov_matrix is not None:
ldl_mat = np.outer(ldl, ldl)
self.cov_matrix /= ldl_mat
if self.data_order2 is not None:
ldl_2 = parameters.FLAM_TO_ADURATE * self.lambdas_order2 * np.abs(np.gradient(self.lambdas_order2))
self.data_order2 /= ldl_2
self.err_order2 /= ldl_2
self.units = 'erg/s/cm$^2$/nm'
def convert_from_flam_to_ADUrate(self):
"""Convert units from erg/s/cm^2/nm to ADU/s.
The SED is supposed to be in flam units ie erg/s/cm^2/nm
Examples
--------
>>> s = Spectrum(file_name='tests/data/reduc_20170605_028_spectrum.fits')
>>> s.convert_from_flam_to_ADUrate()
.. doctest::
:hide:
>>> assert np.max(s.data) > 1e-2
>>> assert np.max(s.err) > 1e-2
"""
if self.units == "ADU/s":
self.my_logger.warning(f"You ask to convert spectrum already in {self.units} in ADU/s... check your code ! "
f"Skip the instruction")
return
ldl = parameters.FLAM_TO_ADURATE * self.lambdas * np.abs(self.lambdas_binwidths)
self.data *= ldl
if self.err is not None:
self.err *= ldl
if self.cov_matrix is not None:
ldl_mat = np.outer(ldl, ldl)
self.cov_matrix *= ldl_mat
if self.data_order2 is not None:
ldl_2 = parameters.FLAM_TO_ADURATE * self.lambdas_order2 * np.abs(np.gradient(self.lambdas_order2))
self.data_order2 *= ldl_2
self.err_order2 *= ldl_2
self.units = 'ADU/s'
def load_filter(self):
"""Load filter properties and set relevant LAMBDA_MIN and LAMBDA_MAX values.
Examples
--------
>>> s = Spectrum()
>>> s.filter_label = 'FGB37'
>>> s.load_filter()
.. doctest::
:hide:
>>> assert np.isclose(parameters.LAMBDA_MIN, 300)
>>> assert np.isclose(parameters.LAMBDA_MAX, 760)
"""
if self.filter_label != "" and "empty" not in self.filter_label.lower():
t = TelescopeTransmission(filter_label=self.filter_label)
t.reset_lambda_range(transmission_threshold=1e-4)
def plot_spectrum(self, ax=None, xlim=None, live_fit=False, label='', force_lines=False):
"""Plot spectrum with emission and absorption lines.
Parameters
----------
ax: Axes, optional
Axes instance (default: None).
label: str
Label for the legend (default: '').
xlim: list, optional
List of minimum and maximum abscisses (default: None)
live_fit: bool, optional
If True the spectrum is plotted in live during the fitting procedures
(default: False).
force_lines: bool
Force the over plot of vertical lines for atomic lines if set to True (default: False).
Examples
--------
>>> s = Spectrum(file_name='tests/data/reduc_20170530_134_spectrum.fits')
>>> s.plot_spectrum(xlim=[500,900], live_fit=False, force_lines=True)
"""
if ax is None:
plt.figure(figsize=[12, 6])
ax = plt.gca()
if label == '':
label = f'Order {self.order:d} spectrum\n' \
r'$D_{\mathrm{CCD}}=' \
rf'{self.disperser.D:.2f}\,$mm'
if self.x0 is not None:
label += rf', $x_0={self.x0[0]:.2f}\,$pix'
title = self.target.label
if self.lambdas_order2 is not None:
distance = self.disperser.grating_lambda_to_pixel(self.lambdas_order2, self.x0, order=2)
lambdas_order2_contamination = self.disperser.grating_pixel_to_lambda(distance, self.x0, order=1)
data_order2_contamination = self.data_order2 * (self.lambdas_order2 * np.gradient(self.lambdas_order2)) \
/ (lambdas_order2_contamination * np.gradient(lambdas_order2_contamination))
if np.sum(data_order2_contamination) / np.sum(self.data) > 0.01:
data_interp = interp1d(self.lambdas, self.data, kind="linear", fill_value="0", bounds_error=False)
plot_spectrum_simple(ax, lambdas_order2_contamination,
data_interp(lambdas_order2_contamination) + data_order2_contamination,
data_err=None, xlim=xlim, label='Order 2 contamination', linestyle="--", lw=1)
plot_spectrum_simple(ax, self.lambdas, self.data, data_err=self.err, xlim=xlim, label=label,
title=title, units=self.units)
if len(self.target.spectra) > 0:
for k in range(len(self.target.spectra)):
plot_indices = np.logical_and(self.target.wavelengths[k] > np.min(self.lambdas),
self.target.wavelengths[k] < np.max(self.lambdas))
s = self.target.spectra[k] / np.max(self.target.spectra[k][plot_indices]) * np.max(self.data)
ax.plot(self.target.wavelengths[k], s, lw=2, label='Tabulated spectra #%d' % k)
if self.lambdas is not None:
self.lines.plot_detected_lines(ax, print_table=parameters.VERBOSE)
if self.lambdas is not None and self.lines is not None:
self.lines.plot_atomic_lines(ax, fontsize=12, force=force_lines)
ax.legend(loc='best')
if self.filters is not None:
ax.get_legend().set_title(self.filters)
plt.gcf().tight_layout()
if parameters.LSST_SAVEFIGPATH: # pragma: no cover
plt.gcf().savefig(os.path.join(parameters.LSST_SAVEFIGPATH, f'{self.target.label}_spectrum.pdf'))
if parameters.DISPLAY:
if live_fit:
plt.draw()
plt.pause(1e-8)
plt.close()
else:
plt.show()
def plot_spectrogram(self, ax=None, scale="lin", title="", units="Image units", plot_stats=False,
target_pixcoords=None, vmin=None, vmax=None, figsize=[9.3, 8], aspect=None,
cmap=None, cax=None):
"""Plot spectrogram.
Parameters
----------
ax: Axes, optional
Axes instance (default: None).
scale: str
Scaling of the image (choose between: lin, log or log10) (default: lin)
title: str
Title of the image (default: "")
units: str
Units of the image to be written in the color bar label (default: "Image units")
cmap: colormap
Color map label (default: None)
target_pixcoords: array_like, optional
2D array giving the (x,y) coordinates of the targets on the image: add a scatter plot (default: None)
vmin: float
Minimum value of the image (default: None)
vmax: float
Maximum value of the image (default: None)
aspect: str
Aspect keyword to be passed to imshow (default: None)
cax: Axes, optional
Color bar axes if necessary (default: None).
figsize: tuple
Figure size (default: [9.3, 8]).
plot_stats: bool
If True, plot the uncertainty map instead of the spectrogram (default: False).
Examples
--------
>>> s = Spectrum(file_name='tests/data/reduc_20170605_028_spectrum.fits')
>>> s.plot_spectrogram()
>>> if parameters.DISPLAY: plt.show()
.. plot::
from spectractor.extractor.spectrum import Spectrum
s = Spectrum(file_name='tests/data/reduc_20170605_028_spectrum.fits')
s.plot_spectrogram()
"""
if ax is None:
plt.figure(figsize=figsize)
ax = plt.gca()
data = np.copy(self.spectrogram)
if plot_stats:
data = np.copy(self.spectrogram_err)
plot_image_simple(ax, data=data, scale=scale, title=title, units=units, cax=cax,
target_pixcoords=target_pixcoords, aspect=aspect, vmin=vmin, vmax=vmax, cmap=cmap)
if parameters.DISPLAY:
plt.show()
if parameters.PdfPages:
parameters.PdfPages.savefig()
def save_spectrum(self, output_file_name, overwrite=False):
"""Save the spectrum into a fits file (data, error and wavelengths).
Parameters
----------
output_file_name: str
Path of the output fits file.
overwrite: bool
If True overwrite the output file if needed (default: False).
Examples
--------
>>> import os
>>> s = Spectrum(file_name='tests/data/reduc_20170530_134_spectrum.fits')
>>> s.save_spectrum('./tests/test.fits')
.. doctest::
:hide:
>>> assert os.path.isfile('./tests/test.fits')
Overwrite previous file:
>>> s.save_spectrum('./tests/test.fits', overwrite=True)
.. doctest::
:hide:
>>> assert os.path.isfile('./tests/test.fits')
>>> os.remove('./tests/test.fits')
"""
self.header["REBIN"] = parameters.CCD_REBIN
self.header.comments['REBIN'] = 'original image rebinning factor to get spectrum.'
self.header['UNIT1'] = "nanometer"
self.header['UNIT2'] = self.units
self.header['COMMENTS'] = 'First column gives the wavelength in unit UNIT1, ' \
'second column gives the spectrum in unit UNIT2, ' \
'third column the corresponding errors.'
hdu1 = fits.PrimaryHDU()
hdu1.header = self.header
for attribute, header_key in fits_mappings.items():
try:
value = getattr(self, attribute)
except AttributeError:
print(f"Failed to get {attribute}")
continue
if isinstance(value, astropy.coordinates.angles.Angle):
value = value.degree
hdu1.header[header_key] = value
print(f"Set header key {header_key} to {value} from attr {attribute}")
hdu1.header["EXTNAME"] = "SPECTRUM"
hdu2 = fits.ImageHDU()
hdu2.header["EXTNAME"] = "SPEC_COV"
hdu3 = fits.ImageHDU()
hdu3.header["EXTNAME"] = "ORDER2"
hdu4 = fits.ImageHDU()
hdu4.header["EXTNAME"] = "ORDER0"
hdu1.data = [self.lambdas, self.data, self.err]
hdu2.data = self.cov_matrix
hdu3.data = [self.lambdas_order2, self.data_order2, self.err_order2]
hdu4.data = self.target.image
hdu4.header["IM_X0"] = self.target.image_x0
hdu4.header["IM_Y0"] = self.target.image_y0
hdu = fits.HDUList([hdu1, hdu2, hdu3, hdu4])
output_directory = '/'.join(output_file_name.split('/')[:-1])
ensure_dir(output_directory)
hdu.writeto(output_file_name, overwrite=overwrite)
self.my_logger.info(f'\n\tSpectrum saved in {output_file_name}')
def save_spectrogram(self, output_file_name, overwrite=False):
"""Save the spectrogram into a fits file (data, error and background).
Parameters
----------
output_file_name: str
Path of the output fits file.
overwrite: bool, optional
If True overwrite the output file if needed (default: False).
Examples
--------
"""
self.header['UNIT1'] = self.units
self.header['COMMENTS'] = 'First HDU gives the data in UNIT1 units, ' \
'second HDU gives the uncertainties, ' \
'third HDU the fitted background.'
self.header['S_X0'] = self.spectrogram_x0
self.header['S_Y0'] = self.spectrogram_y0
self.header['S_XMIN'] = self.spectrogram_xmin
self.header['S_XMAX'] = self.spectrogram_xmax
self.header['S_YMIN'] = self.spectrogram_ymin
self.header['S_YMAX'] = self.spectrogram_ymax
self.header['S_DEG'] = self.spectrogram_deg
self.header['S_SAT'] = self.spectrogram_saturation
hdu1 = fits.PrimaryHDU()
hdu1.header["EXTNAME"] = "S_DATA"
hdu2 = fits.ImageHDU()
hdu2.header["EXTNAME"] = "S_ERR"
hdu3 = fits.ImageHDU()
hdu3.header["EXTNAME"] = "S_BGD"
hdu4 = fits.ImageHDU()
hdu4.header["EXTNAME"] = "S_BGD_ER"
hdu5 = fits.ImageHDU()
hdu5.header["EXTNAME"] = "S_FIT"
hdu6 = fits.ImageHDU()
hdu6.header["EXTNAME"] = "S_RES"
hdu1.header = self.header
hdu1.data = self.spectrogram
hdu2.data = self.spectrogram_err
hdu3.data = self.spectrogram_bgd
hdu4.data = self.spectrogram_bgd_rms
hdu5.data = self.spectrogram_fit
hdu6.data = self.spectrogram_residuals
hdu = fits.HDUList([hdu1, hdu2, hdu3, hdu4, hdu5, hdu6])
output_directory = '/'.join(output_file_name.split('/')[:-1])
ensure_dir(output_directory)
hdu.writeto(output_file_name, overwrite=overwrite)
self.my_logger.info('\n\tSpectrogram saved in %s' % output_file_name)
def load_spectrum(self, input_file_name, spectrogram_file_name_override=None,
psf_file_name_override=None):
"""Load the spectrum from a fits file (data, error and wavelengths).
Parameters
----------
input_file_name: str
Path to the input fits file
spectrogram_file_name_override : str
Manually specify a path to the spectrogram file.
psf_file_name_override : str
Manually specify a path to the psf file.
Examples
--------
>>> s = Spectrum()
>>> s.load_spectrum('tests/data/reduc_20170605_028_spectrum.fits')
>>> print(s.units)
erg/s/cm$^2$/nm
"""
if os.path.isfile(input_file_name):
self.header, raw_data = load_fits(input_file_name)
self.lambdas = raw_data[0]
self.lambdas_binwidths = np.gradient(self.lambdas)
self.data = raw_data[1]
if len(raw_data) > 2:
self.err = raw_data[2]
# set the simple items from the mappings. More complex items, i.e.
# those needing function calls, follow
for attribute, header_key in fits_mappings.items():
if (item := self.header.get(header_key)) is not None:
setattr(self, attribute, item)
print(f'set {attribute} to {item}')
else:
print(f'Failed to set spectrum attribute {attribute} using header {header_key}')
# set the more complex items by hand here
if target := self.header.get('TARGET'):
self.target = load_target(target, verbose=parameters.VERBOSE)
self.lines = self.target.lines
if (targetx := self.header.get('TARGETX')) and (targety := self.header.get('TARGETY')):
self.x0 = [targetx, targety] # should be a tuple not a list
if rebin := self.header.get('CCDREBIN'):
if parameters.CCD_REBIN != rebin:
raise ValueError("Different values of rebinning parameters between config file and header. Choose.")
parameters.CCD_REBIN = rebin
if dist := self.header.get('D2CCD'):
parameters.DISTANCE2CCD = float(dist)
self.my_logger.info('\n\tLoading disperser %s...' % self.disperser_label)
self.disperser = Hologram(self.disperser_label, D=parameters.DISTANCE2CCD,
data_dir=parameters.DISPERSER_DIR, verbose=parameters.VERBOSE)
self.my_logger.info('\n\tSpectrum loaded from %s' % input_file_name)
if parameters.OBS_OBJECT_TYPE == "STAR":
self.adr_params = [self.dec, self.hour_angle, self.temperature,
self.pressure, self.humidity, self.airmass]
hdu_list = fits.open(input_file_name)
if len(hdu_list) > 1:
self.cov_matrix = hdu_list["SPEC_COV"].data
if len(hdu_list) > 2:
self.lambdas_order2, self.data_order2, self.err_order2 = hdu_list["ORDER2"].data
if len(hdu_list) > 3:
self.target.image = hdu_list["ORDER0"].data
self.target.image_x0 = float(hdu_list["ORDER0"].header["IM_X0"])
self.target.image_y0 = float(hdu_list["ORDER0"].header["IM_Y0"])
else:
self.cov_matrix = np.diag(self.err ** 2)
# original, hard-coded spectrogram/table relative paths
spectrogram_file_name = input_file_name.replace('spectrum', 'spectrogram')
psf_file_name = input_file_name.replace('spectrum.fits', 'table.csv')
# for LSST-DM supplied filenames
if spectrogram_file_name_override and psf_file_name_override:
self.fast_load = False
spectrogram_file_name = spectrogram_file_name_override
psf_file_name = psf_file_name_override
self.my_logger.info(f'Applying spectrogram filename override {spectrogram_file_name}')
self.my_logger.info(f'Applying psf filename override {psf_file_name}')
if not self.fast_load:
self.my_logger.info(f'\n\tLoading spectrogram from {spectrogram_file_name}...')
if os.path.isfile(spectrogram_file_name):
self.load_spectrogram(spectrogram_file_name)
else:
raise FileNotFoundError(f"Spectrogram file {spectrogram_file_name} does not exist.")
self.my_logger.info(f'\n\tLoading PSF from {psf_file_name}...')
if os.path.isfile(psf_file_name):
self.load_chromatic_psf(psf_file_name)
else:
raise FileNotFoundError(f"PSF file {psf_file_name} does not exist.")
else:
raise FileNotFoundError(f'\n\tSpectrum file {input_file_name} not found')
def load_spectrogram(self, input_file_name):
"""Load the spectrum from a fits file (data, error and wavelengths).
Parameters
----------
input_file_name: str
Path to the input fits file
Examples
--------
>>> s = Spectrum()
>>> s.load_spectrum('tests/data/reduc_20170605_028_spectrum.fits')
"""
if os.path.isfile(input_file_name):
hdu_list = fits.open(input_file_name)
header = hdu_list[0].header
self.spectrogram = hdu_list[0].data
self.spectrogram_err = hdu_list[1].data
self.spectrogram_bgd = hdu_list[2].data
if len(hdu_list) > 3:
self.spectrogram_bgd_rms = hdu_list[3].data
self.spectrogram_fit = hdu_list[4].data
self.spectrogram_residuals = hdu_list[5].data
self.spectrogram_x0 = float(header['S_X0'])
self.spectrogram_y0 = float(header['S_Y0'])
self.spectrogram_xmin = int(header['S_XMIN'])
self.spectrogram_xmax = int(header['S_XMAX'])
self.spectrogram_ymin = int(header['S_YMIN'])
self.spectrogram_ymax = int(header['S_YMAX'])
self.spectrogram_deg = int(header['S_DEG'])
self.spectrogram_saturation = float(header['S_SAT'])
self.spectrogram_Nx = self.spectrogram_xmax - self.spectrogram_xmin
self.spectrogram_Ny = self.spectrogram_ymax - self.spectrogram_ymin
hdu_list.close() # need to free allocation for file descripto
self.my_logger.info('\n\tSpectrogram loaded from %s' % input_file_name)
else:
self.my_logger.warning('\n\tSpectrogram file %s not found' % input_file_name)
def load_chromatic_psf(self, input_file_name):
"""Load the spectrum from a fits file (data, error and wavelengths).
Parameters
----------
input_file_name: str
Path to the input fits file
Examples
--------
>>> s = Spectrum()
>>> s.load_spectrum('./tests/data/reduc_20170530_134_spectrum.fits')
>>> print(s.chromatic_psf.table) #doctest: +ELLIPSIS
lambdas Dx ...
"""
if os.path.isfile(input_file_name):
self.psf = load_PSF(psf_type=parameters.PSF_TYPE, target=self.target)
self.chromatic_psf = ChromaticPSF(self.psf, self.spectrogram_Nx, self.spectrogram_Ny,
x0=self.spectrogram_x0, y0=self.spectrogram_y0,
deg=self.spectrogram_deg, saturation=self.spectrogram_saturation,
file_name=input_file_name)
if 'PSF_REG' in self.header and float(self.header["PSF_REG"]) > 0:
self.chromatic_psf.opt_reg = float(self.header["PSF_REG"])
self.my_logger.info(f'\n\tSpectrogram loaded from {input_file_name}')
else:
self.my_logger.warning(f'\n\tSpectrogram file {input_file_name} not found')
def detect_lines(lines, lambdas, spec, spec_err=None, cov_matrix=None, fwhm_func=None, snr_minlevel=3, ax=None,
calibration_lines_only=False,
xlim=(parameters.LAMBDA_MIN, parameters.LAMBDA_MAX)):
"""Detect and fit the lines in a spectrum. The method is to look at maxima or minima
around emission or absorption tabulated lines, and to select surrounding pixels
to fit a (positive or negative) gaussian and a polynomial background. If several regions
overlap, a multi-gaussian fit is performed above a common polynomial background.
The mean global shift (in nm) between the detected and tabulated lines is returned, considering
only the lines with a signal-to-noise ratio above a threshold.
The order of the polynomial background is set in parameters.py with CALIB_BGD_ORDER.
Parameters
----------
lines: Lines
The Lines object containing the line characteristics
lambdas: float array
The wavelength array (in nm)
spec: float array
The spectrum amplitude array
spec_err: float array, optional
The spectrum amplitude uncertainty array (default: None)
cov_matrix: float array, optional
The spectrum amplitude 2D covariance matrix array (default: None)
fwhm_func: callable, optional
The fwhm of the cross spectrum to reset CALIB_PEAK_WIDTH parameter as a function of lambda (default: None)
snr_minlevel: float
The minimum signal over noise ratio to consider using a fitted line in the computation of the mean
shift output and to print it in the outpur table (default: 3)
ax: Axes, optional
An Axes instance to over plot the result of the fit (default: None).
calibration_lines_only: bool, optional
If True, try to detect only the lines with use_for_calibration attributes set True.
xlim: array, optional
(min, max) list limiting the wavelength interval where to detect spectral lines (default:
(parameters.LAMBDA_MIN, parameters.LAMBDA_MAX))
Returns
-------
shift: float
The mean shift (in nm) between the detected and tabulated lines
Examples
--------
Creation of a mock spectrum with emission and absorption lines:
>>> import numpy as np
>>> from spectractor.extractor.spectroscopy import Lines, HALPHA, HBETA, O2_1
>>> lambdas = np.arange(300,1000,1)
>>> spectrum = 1e4*np.exp(-((lambdas-600)/200)**2)
>>> spectrum += HALPHA.gaussian_model(lambdas, A=5000, sigma=3)
>>> spectrum += HBETA.gaussian_model(lambdas, A=3000, sigma=2)
>>> spectrum += O2_1.gaussian_model(lambdas, A=-3000, sigma=7)
>>> spectrum_err = np.sqrt(spectrum)
>>> cov = np.diag(spectrum_err)
>>> spectrum = np.random.poisson(spectrum)
>>> spec = Spectrum()
>>> spec.lambdas = lambdas
>>> spec.data = spectrum
>>> spec.err = spectrum_err
>>> fwhm_func = interp1d(lambdas, 0.01 * lambdas)
Detect the lines:
>>> lines = Lines([HALPHA, HBETA, O2_1], hydrogen_only=True,
... atmospheric_lines=True, redshift=0, emission_spectrum=True)
>>> global_chisq = detect_lines(lines, lambdas, spectrum, spectrum_err, cov, fwhm_func=fwhm_func)
.. doctest::
:hide:
>>> assert(global_chisq < 2)
Plot the result:
>>> import matplotlib.pyplot as plt
>>> spec.lines = lines
>>> fig = plt.figure()
>>> plot_spectrum_simple(plt.gca(), lambdas, spec.data, data_err=spec.err)
>>> lines.plot_detected_lines(plt.gca())
>>> if parameters.DISPLAY: plt.show()
"""
# main settings
peak_width = parameters.CALIB_PEAK_WIDTH
bgd_width = parameters.CALIB_BGD_WIDTH
# if lines.hydrogen_only:
# peak_width = 7
# bgd_width = 15
fwhm_to_peak_width_factor = 1.5
len_index_to_bgd_npar_factor = 0 * 0.12 / 0.024 * parameters.CCD_PIXEL2MM
baseline_prior = 3 # *sigma gaussian prior on base line fit
# filter the noise
# plt.errorbar(lambdas,spec,yerr=spec_err)
spec = np.copy(spec)
spec_smooth = savgol_filter(spec, parameters.CALIB_SAVGOL_WINDOW, parameters.CALIB_SAVGOL_ORDER)
# plt.plot(lambdas,spec)
# plt.show()
# initialisation
lambda_shifts = []
snrs = []
index_list = []
bgd_npar_list = []
peak_index_list = []
guess_list = []
bounds_list = []
lines_list = []
for line in lines.lines:
# reset line fit attributes
line.fitted = False
line.fit_popt = None
line.high_snr = False
if not line.use_for_calibration and calibration_lines_only:
continue
# wavelength of the line: find the nearest pixel index
line_wavelength = line.wavelength
if fwhm_func is not None:
peak_width = max(fwhm_to_peak_width_factor * fwhm_func(line_wavelength), parameters.CALIB_PEAK_WIDTH)
if line_wavelength < xlim[0] or line_wavelength > xlim[1]:
continue
l_index, l_lambdas = find_nearest(lambdas, line_wavelength)
# reject if pixel index is too close to image bounds
if l_index < peak_width or l_index > len(lambdas) - peak_width:
continue
# search for local extrema to detect emission or absorption line
# around pixel index +/- peak_width
line_strategy = np.greater # look for emission line
bgd_strategy = np.less
if not lines.emission_spectrum or line.atmospheric:
line_strategy = np.less # look for absorption line
bgd_strategy = np.greater
index = np.arange(l_index - peak_width, l_index + peak_width, 1).astype(int)
# skip if data is masked with NaN
if np.any(np.isnan(spec_smooth[index])):
continue
extrema = argrelextrema(spec_smooth[index], line_strategy)
if len(extrema[0]) == 0:
continue
peak_index = index[0] + extrema[0][0]
# if several extrema, look for the greatest
if len(extrema[0]) > 1:
if line_strategy == np.greater:
test = -1e20
for m in extrema[0]:
idx = index[0] + m
if spec_smooth[idx] > test:
peak_index = idx
test = spec_smooth[idx]
elif line_strategy == np.less:
test = 1e20
for m in extrema[0]:
idx = index[0] + m
if spec_smooth[idx] < test:
peak_index = idx
test = spec_smooth[idx]
# search for first local minima around the local maximum
# or for first local maxima around the local minimum
# around +/- 3*peak_width
index_inf = peak_index - 1 # extrema on the left
while index_inf > max(0, peak_index - 3 * peak_width):
test_index = np.arange(index_inf, peak_index, 1).astype(int)
minm = argrelextrema(spec_smooth[test_index], bgd_strategy)
if len(minm[0]) > 0:
index_inf = index_inf + minm[0][0]
break
else:
index_inf -= 1
index_sup = peak_index + 1 # extrema on the right
while index_sup < min(len(spec_smooth) - 1, peak_index + 3 * peak_width):
test_index = np.arange(peak_index, index_sup, 1).astype(int)
minm = argrelextrema(spec_smooth[test_index], bgd_strategy)
if len(minm[0]) > 0:
index_sup = peak_index + minm[0][0]
break
else:
index_sup += 1
index_sup = max(index_sup, peak_index + peak_width)
index_inf = min(index_inf, peak_index - peak_width)
# pixel range to consider around the peak, adding bgd_width pixels
# to fit for background around the peak
index = list(np.arange(max(0, index_inf - bgd_width),
min(len(lambdas), index_sup + bgd_width), 1).astype(int))
# skip if data is masked with NaN
if np.any(np.isnan(spec_smooth[index])):
continue
# first guess and bounds to fit the line properties and
# the background with CALIB_BGD_ORDER order polynom
# guess = [0] * bgd_npar + [0.5 * np.max(spec_smooth[index]), lambdas[peak_index],
# 0.5 * (line.width_bounds[0] + line.width_bounds[1])]
bgd_npar = max(parameters.CALIB_BGD_NPARAMS, int(len_index_to_bgd_npar_factor * (index[-1] - index[0])))
bgd_npar_list.append(bgd_npar)
guess = [0] * bgd_npar + [0.5 * np.max(spec_smooth[index]), line_wavelength,
0.5 * (line.width_bounds[0] + line.width_bounds[1])]
if line_strategy == np.less:
# noinspection PyTypeChecker
guess[bgd_npar] = -0.5 * np.max(spec_smooth[index]) # look for abosrption under bgd
# bounds = [[-np.inf] * bgd_npar + [-abs(np.max(spec[index])), lambdas[index_inf], line.width_bounds[0]],
# [np.inf] * bgd_npar + [abs(np.max(spec[index])), lambdas[index_sup], line.width_bounds[1]]]
bounds = [[-np.inf] * bgd_npar + [-abs(np.max(spec[index])), line_wavelength - peak_width / 2,
line.width_bounds[0]],
[np.inf] * bgd_npar + [abs(np.max(spec[index])), line_wavelength + peak_width / 2,
line.width_bounds[1]]]
# gaussian amplitude bounds depend if line is emission/absorption
if line_strategy == np.less:
bounds[1][bgd_npar] = 0 # look for absorption under bgd
else:
bounds[0][bgd_npar] = 0 # look for emission above bgd
peak_index_list.append(peak_index)
index_list.append(index)
lines_list.append(line)
guess_list.append(guess)
bounds_list.append(bounds)
# now gather lines together if pixel index ranges overlap
idx = 0
merges = [[0]]
while idx < len(index_list) - 1:
idx = merges[-1][-1]
if idx == len(index_list) - 1:
break
if index_list[idx + 1][0] > index_list[idx][0]: # increasing order
if index_list[idx][-1] > index_list[idx + 1][0]:
merges[-1].append(idx + 1)
else:
merges.append([idx + 1])
idx += 1
else: # decreasing order
if index_list[idx][0] < index_list[idx + 1][-1]:
merges[-1].append(idx + 1)
else:
merges.append([idx + 1])
idx += 1
# reorder merge list with respect to lambdas in guess list
new_merges = []
for merge in merges:
if len(guess_list) == 0:
continue
tmp_guess = [guess_list[i][-2] for i in merge]
new_merges.append([x for _, x in sorted(zip(tmp_guess, merge))])
# reorder lists with merges
new_peak_index_list = []
new_index_list = []
new_guess_list = []
new_bounds_list = []
new_lines_list = []
for merge in new_merges:
new_peak_index_list.append([])
new_index_list.append([])
new_guess_list.append([])
new_bounds_list.append([[], []])
new_lines_list.append([])
for i in merge:
# add the bgd parameters
bgd_npar = bgd_npar_list[i]
# if i == merge[0]:
# new_guess_list[-1] += guess_list[i][:bgd_npar]
# new_bounds_list[-1][0] += bounds_list[i][0][:bgd_npar]
# new_bounds_list[-1][1] += bounds_list[i][1][:bgd_npar]
# add the gauss parameters
new_peak_index_list[-1].append(peak_index_list[i])
new_index_list[-1] += index_list[i]
new_guess_list[-1] += guess_list[i][bgd_npar:]
new_bounds_list[-1][0] += bounds_list[i][0][bgd_npar:]
new_bounds_list[-1][1] += bounds_list[i][1][bgd_npar:]
new_lines_list[-1].append(lines_list[i])
# set central peak bounds exactly between two close lines
for k in range(len(merge) - 1):
new_bounds_list[-1][0][3 * (k + 1) + 1] = 0.5 * (
new_guess_list[-1][3 * k + 1] + new_guess_list[-1][3 * (k + 1) + 1])
new_bounds_list[-1][1][3 * k + 1] = 0.5 * (
new_guess_list[-1][3 * k + 1] + new_guess_list[-1][3 * (k + 1) + 1]) + 1e-3
# last term is to avoid equalities
# between bounds in some pathological case
# sort pixel indices and remove doublons
new_index_list[-1] = sorted(list(set(new_index_list[-1])))
# fit the line subsets and background
global_chisq = 0
for k in range(len(new_index_list)):
# first guess for the base line with the lateral bands
peak_index = new_peak_index_list[k]
index = new_index_list[k]
guess = new_guess_list[k]
bounds = new_bounds_list[k]
bgd_index = []
if fwhm_func is not None:
peak_width = fwhm_to_peak_width_factor * np.mean(fwhm_func(lambdas[index]))
for i in index:
is_close_to_peak = False
for j in peak_index:
if abs(i - j) < peak_width:
is_close_to_peak = True
break
if not is_close_to_peak:
bgd_index.append(i)
# add background guess and bounds
bgd_npar = max(parameters.CALIB_BGD_ORDER + 1, int(len_index_to_bgd_npar_factor * len(bgd_index)))
parameters.CALIB_BGD_NPARAMS = bgd_npar
guess = [0] * bgd_npar + guess
bounds[0] = [-np.inf] * bgd_npar + bounds[0]
bounds[1] = [np.inf] * bgd_npar + bounds[1]
if len(bgd_index) > 0:
try:
if spec_err is not None:
w = 1. / spec_err[bgd_index]
else:
w = np.ones_like(lambdas[bgd_index])
fit, cov, model = fit_poly1d_legendre(lambdas[bgd_index], spec[bgd_index], order=bgd_npar - 1, w=w)
except:
if spec_err is not None:
w = 1. / spec_err[index]
else:
w = np.ones_like(lambdas[index])
fit, cov, model = fit_poly1d_legendre(lambdas[index], spec[index], order=bgd_npar - 1, w=w)
else:
if spec_err is not None:
w = 1. / spec_err[index]
else:
w = np.ones_like(lambdas[index])
fit, cov, model = fit_poly1d_legendre(lambdas[index], spec[index], order=bgd_npar - 1, w=w)
# lines.my_logger.warning(f'{bgd_npar} {fit}')
# fig = plt.figure()
# plt.plot(lambdas[index], spec[index])
# plt.plot(lambdas[bgd_index], spec[bgd_index], 'ro')
# x_norm = rescale_x_for_legendre(lambdas[index])
# lines.my_logger.warning(f'tototot {x_norm}')
# plt.plot(lambdas[index], np.polynomial.legendre.legval(x_norm, fit), 'b-')
# plt.plot(lambdas[bgd_index], model, 'b--')
# plt.title(f"{fit}")
# plt.show()
for n in range(bgd_npar):
# guess[n] = getattr(bgd, bgd.param_names[parameters.CALIB_BGD_ORDER - n]).value
guess[n] = fit[n]
b = abs(baseline_prior * guess[n])
if np.isclose(b, 0, rtol=1e-2 * float(np.mean(spec_smooth[bgd_index]))):
b = baseline_prior * np.std(spec_smooth[bgd_index])
if np.isclose(b, 0, rtol=1e-2 * float(np.mean(spec_smooth[bgd_index]))):
b = np.inf
bounds[0][n] = guess[n] - b
bounds[1][n] = guess[n] + b
for j in range(len(new_lines_list[k])):
idx = new_peak_index_list[k][j]
x_norm = rescale_x_for_legendre(lambdas[idx])
guess[bgd_npar + 3 * j] = np.sign(guess[bgd_npar + 3 * j]) * abs(
spec_smooth[idx] - np.polynomial.legendre.legval(x_norm, guess[:bgd_npar]))
if np.sign(guess[bgd_npar + 3 * j]) < 0: # absorption
bounds[0][bgd_npar + 3 * j] = 2 * guess[bgd_npar + 3 * j]
else: # emission
bounds[1][bgd_npar + 3 * j] = 2 * guess[bgd_npar + 3 * j]
# fit local extrema with a multigaussian + CALIB_BGD_ORDER polynom
# account for the spectrum uncertainties if provided
sigma = None
if spec_err is not None:
sigma = spec_err[index]
if cov_matrix is not None:
sigma = cov_matrix[index, index]
# my_logger.warning(f'\n{guess} {np.mean(spec[bgd_index])} {np.std(spec[bgd_index])}')
popt, pcov = fit_multigauss_and_bgd(lambdas[index], spec[index], guess=guess, bounds=bounds, sigma=sigma)
# noise level defined as the std of the residuals if no error
noise_level = np.std(spec[index] - multigauss_and_bgd(lambdas[index], *popt))
# otherwise mean of error bars of bgd lateral bands
if sigma is not None:
chisq = np.sum((multigauss_and_bgd(lambdas[index], *popt) - spec[index]) ** 2 / (sigma * sigma))
else:
chisq = np.sum((multigauss_and_bgd(lambdas[index], *popt) - spec[index]) ** 2)
chisq /= len(index)
global_chisq += chisq
if spec_err is not None:
noise_level = np.sqrt(np.mean(spec_err[index] ** 2))
for j in range(len(new_lines_list[k])):
line = new_lines_list[k][j]
peak_pos = popt[bgd_npar + 3 * j + 1]
# FWHM
FWHM = np.abs(popt[bgd_npar + 3 * j + 2]) * 2.355
# SNR computation
# signal_level = popt[bgd_npar+3*j]
signal_level = popt[
bgd_npar + 3 * j] # multigauss_and_bgd(peak_pos, *popt) - np.polyval(popt[:bgd_npar], peak_pos)
snr = np.abs(signal_level / noise_level)
# save fit results
line.fitted = True
line.fit_index = index
line.fit_lambdas = lambdas[index]
x_norm = rescale_x_for_legendre(lambdas[index])
x_step = 0.1 # nm
x_int = np.arange(max( | np.min(lambdas) | numpy.min |
import numpy as np
import matplotlib.pyplot as plt
import auralib as aura
from numpy.fft import fftfreq, fft, ifft, fftshift, ifftshift
from scipy.interpolate import interp1d
import scipy as sp
def get_traces_for_matching_filter(basefile, monfile, step):
buf1 = aura.segy.Segy(basefile)
buf2 = aura.segy.Segy(monfile)
dt = buf1.bhead['samp_rate']*1e-6
nsamp = buf1.bhead['num_samp']
twtr = np.arange(nsamp)*dt
tdata1r = []
tdata2r = []
trcnum = []
for i in np.arange(0, buf1.num_traces, step):
tmp1 = buf1.read_tdata(i)
if np.mean(np.abs(tmp1)) > 0.0:
tmp2 = buf2.read_tdata(i)
if np.mean(np.abs(tmp2)) > 0.0:
tdata1r.append(tmp1)
tdata2r.append(tmp2)
trcnum.append(i)
tdata1r = np.array(tdata1r)
tdata2r = np.array(tdata2r)
trcnum = | np.array(trcnum) | numpy.array |
"""
Free-flyer Gripper Grasping. For model-free RL learning of trajectory to grasp an object.
*0*o798gaWoJ
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import scipy
from scipy.integrate import odeint
def soft_abs(x, alpha=1.0, d=0):
z = np.sqrt(alpha**2 + x**2)
if d == 0:
return z - alpha
if d == 1:
return x/z
if d == 2:
return alpha**2 / z**3
def vector_cross(x,y):
"""
Does cross product of two 3x1 np arrays.
Normal numpy cross product only takes vectors.
"""
assert x.shape[0] == 3
assert y.shape[0] == 3
return np.expand_dims(np.cross(x[:,0],y[:,0]), axis=-1)
def vector_dot(x,y):
"""
Does dot product of two 3x1 np arrays.
Normal numpy dot product only takes vectors.
"""
assert x.shape[0] == 3
assert y.shape[0] == 3
return np.dot(x[:,0],y[:,0])
def norm_angle(th):
while th > math.pi:
th -= math.pi
while th < -math.pi:
th += math.pi
return th
logger = logging.getLogger(__name__)
class GraspEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30
}
def __init__(self,costf='simple',randomize_params=False,rand_init=True):
self.s_dim = 12 # state: xs, ys, ths, vxs, vys, vths, xo, yo, tho, vxo, vyo, vtho
self.a_dim = 3
self.costf = 'simple'
self.randomize_params = randomize_params
self.rand_init = rand_init
#spacecraft params:
self.ms = 6700. # SSL-1300 bus
self.Js = 1/12 * 6700 * (5^2+5^2) # cube
self.rs = 2.5
self.Ls = 1.5
#object params:
self.mo_nom = 1973. # Landsat-7 bus
self.Jo_nom = 1/12 * self.mo_nom * (4^2 + 4^2) # cube
self.ro = 1.5
self.Lo = 1.5
#interface params:
self.kx = 0.5
self.ky = 0.5
self.kth = 0.5
self.dx = 0.2
self.dy = 0.2
self.dth = 0.25
self.dt = 0.1
# Randomization limits
self.panel1_len_nom = 5.
self.panel1_angle_nom = 2*math.pi/3.
self.panel2_len_nom = 5.
self.panel2_angle_nom = -2*math.pi/3.
# State + action bounds
# state: xs, ys, ths, vxs, vys, vths, xo, yo, tho, vxo, vyo, vtho
self.x_upper = 10.
self.x_lower = -10.
self.y_upper = self.x_upper
self.y_lower = self.x_lower
self.v_limit = 0.5 #vel limit for all directions
self.angle_limit = math.pi
self.angle_deriv_limit = math.pi/16.
self.f_upper = 5. # Aerojet Rocketdyne MR-111
self.f_lower = 0.
self.M_lim = 0.075 # Rockwell Collins RSI 4-75
# -- simple cost terms
self.simple_dist_cost = 0.1
self.simple_angle_cost = 0.1
self.simple_ang_vel_cost = 0.05
self.simple_vel_cost = 0.2
self.simple_f1_cost = 0.5
self.simple_f2_cost = 0.5
self.simple_m_cost = 0.7
# --
# I think this is from CM-gripper to CM-object
self.offset_distance = self.rs + self.ro + self.Ls + self.Lo
# define default initial state (note: not used if rand_init=True)
self.start_state = np.zeros(self.s_dim)
self.start_state[0] = -5.
self.start_state[6] = 5.
# define goal region, based on grasping envelope from ICRA 2016 paper
self.goal_eps_norm = 0.2 # contact
self.goal_eps_tan = 0.1 # offset
self.goal_eps_ang = math.pi/4.
self.goal_eps_vel_lower = 0.2
self.goal_eps_vel_upper = 0.8
self.goal_eps_ang_vel = math.pi
high_ob = [self.x_upper,
self.y_upper,
self.angle_limit,
self.v_limit,
self.v_limit,
self.angle_deriv_limit,
self.x_upper,
self.y_upper,
self.angle_limit,
self.v_limit,
self.v_limit,
self.angle_deriv_limit]
low_ob = [self.x_lower,
self.y_lower,
-self.angle_limit,
-self.v_limit,
-self.v_limit,
-self.angle_deriv_limit,
self.x_lower,
self.y_lower,
-self.angle_limit,
-self.v_limit,
-self.v_limit,
-self.angle_deriv_limit]
high_state = high_ob
low_state = low_ob
high_state = np.array(high_state)
low_state = np.array(low_state)
high_obsv = np.array(high_ob)
low_obsv = np.array(low_ob)
high_actions = np.array([self.f_upper,
self.f_upper,
self.M_lim])
low_actions = np.array([-self.f_upper,
-self.f_upper,
-self.M_lim])
self.action_space = spaces.Box(low=low_actions, high=high_actions)
self.state_space = spaces.Box(low=low_state, high=high_state)
self.observation_space = self.state_space #spaces.Box(low=low_obsv, high=high_obsv)
self.seed(2017)
self.viewer = None
def get_ac_sample(self):
thrust1 = np.random.uniform(-self.f_upper,self.f_upper)*0.1
thrust2 = np.random.uniform(-self.f_upper,self.f_upper)*0.1
m = np.random.uniform(-self.M_lim,self.M_lim)*0.1
return [thrust1,thrust2,m]
def get_ob_sample(self):
# currently setting random state, not doing trajs
z = self.state_space.sample()
# train always in the same-ish direction
z[0] = np.random.uniform(-5, -2)
z[1] = np.random.uniform(-5, -2)
z[2] = np.random.uniform(-math.pi, math.pi)
# start at zero velocity
z[3] = 0 #np.random.uniform(-0.1,0.1)
z[4] = 0 #np.random.uniform(-0.1,0.1)
z[5] = 0
z[6] = np.random.uniform(2,5)
z[7] = np.random.uniform(2,5)
z[8] = 0 # doesn't matter
z[9] = np.random.uniform(-0.1,0.1)
z[10] = np.random.uniform(-0.1,0.1)
z[11] = 0 # doesn't matter
# # keep moving object until they're not on top of each other
# while np.sqrt((z[6]-z[0])**2 + (z[7]-z[1])**2) < 1.2*(self.ro+self.rs):
# z[6] = np.random.uniform(self.x_lower, self.x_upper)
# z[7] = np.random.uniform(self.y_lower, self.y_upper)
return z
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def plot_quad_in_map(self):
# TODO
pass
def _in_obst(self, state):
# TODO
return False
def _get_obs(self, state):
return state
def _gen_state_rew(self,state):
# TODO
pass
def _gen_control_rew(self,u):
# TODO
pass
def _goal_dist(self, state):
xs, ys, ths, vxs, vys, vths, xo, yo, tho, vxo, vyo, vtho = state
s2o = np.array([xo-xs,yo-ys]);
xs_hat = np.array([[np.cos(ths)],[np.sin(ths)]])
ys_hat = np.array([[-np.sin(ths)],[np.cos(ths)]])
norm_dist_to_object = soft_abs( | np.dot(s2o,xs_hat) | numpy.dot |
import pickle
import argparse
import os.path
from mutual_information.mf_random import MutualInfoRandomizer
import logging.config
import numpy as np
import math
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'log_config.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger(__name__)
def main():
HOME_DIR = os.path.expanduser('~')
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(dest='command')
simulate_parser = subparser.add_parser('simulate',
help='create empirical distributions')
simulate_parser.add_argument('-i', '--input', help='input file path',
action='store', dest='input_path')
simulate_parser.add_argument('-o', '--out', help='output directory',
action='store', dest='out_dir', default=HOME_DIR)
simulate_parser.add_argument('-n', '--n_per_simulation',
help='sample size per simulation. ignore if to use '
'the same size in original data',
dest='n_per_run', type=int, default=None)
simulate_parser.add_argument('-N', '--N_SIMULATIONS', help='total simulations',
dest='N_SIMULATIONS', type=int, default=1000)
simulate_parser.add_argument('-v', '--verbose', help='print messages',
dest='verbose', action='store_true',
default=False)
simulate_parser.add_argument('-job_id', help='job array id (PBS_ARRAYID)',
dest='job_id', type=int, default=None)
simulate_parser.add_argument('-cpu', help='specify the number of available cpu',
default=None, type=int, dest='cpu')
simulate_parser.add_argument('-disease', help='specify if only to analyze such disease',
default=[], dest='disease_of_interest',
type=str)
simulate_parser.set_defaults(func=simulate)
estimate_parser = subparser.add_parser('estimate',
help='estimate p value from empirical distributions')
estimate_parser.add_argument('-i', '--input', help='input file path',
action='store', dest='input_path')
estimate_parser.add_argument('-dist', '--dist_path',
help='empirical distribution path',
action='store', dest='dist_path')
estimate_parser.add_argument('-o', '--ouput', help='output dir',
action='store', dest='out_dir')
estimate_parser.add_argument('-disease', help='specify if only to analyze such disease',
default=[], dest='disease_of_interest',
type=str)
estimate_parser.set_defaults(func=estimate)
args = parser.parse_args()
if args.command is None:
parser.print_help()
else:
args.func(args)
def simulate(args):
input_path = args.input_path
per_simulation = args.n_per_run
simulations = args.N_SIMULATIONS
verbose = args.verbose
dir = args.out_dir
cpu = args.cpu
job_id = args.job_id
disease_of_interest = args.disease_of_interest
with open(input_path, 'rb') as in_file:
disease_synergy_map = pickle.load(in_file)
logger.info('number of diseases to run simulations for {}'.format(
len(disease_synergy_map)))
if job_id is None:
job_suffix = ''
else:
job_suffix = '_' + str(job_id)
for disease, synergy in disease_synergy_map.items():
if disease_of_interest is not None and \
disease not in disease_of_interest:
continue
randmizer = MutualInfoRandomizer(synergy)
if verbose:
print('start calculating p values for {}'.format(disease))
randmizer.simulate(per_simulation, simulations, cpu, job_id)
# p = randmizer.p_value()
# p_filepath = os.path.join(dir, disease + '_p_value_.obj')
# with open(p_filepath, 'wb') as f:
# pickle.dump(p, file=f, protocol=2)
distribution_file_path = os.path.join(dir, disease + job_suffix +
'_distribution.obj')
with open(distribution_file_path, 'wb') as f2:
pickle.dump(randmizer.empirical_distribution, file=f2, protocol=2)
if verbose:
print('saved current batch of simulations {} for {}'.format(
job_id, disease))
def estimate(args):
input_path = args.input_path
dist_path = args.dist_path
out_path = args.out_dir
disease_of_interest = args.disease_of_interest
print(args)
with open(input_path, 'rb') as in_file:
mf_map = pickle.load(in_file)
logger.info('number of diseases to run simulations for {}'.format(
len(mf_map)))
for disease, summary_statistics in mf_map.items():
if disease_of_interest is not None and \
disease not in disease_of_interest:
continue
randmizer = MutualInfoRandomizer(summary_statistics)
empirical_distribution = load_distribution(dist_path, disease)
# serialize_empirical_distributions(empirical_distribution['synergy'],
# os.path.join(out_path, disease +
# '_empirical_distribution_subset.obj'))
randmizer.empirical_distribution = empirical_distribution
p = randmizer.p_values()
p_path = os.path.join(out_path, 'p_value_{}.obj'.format(disease_of_interest))
with open(p_path, 'wb') as f:
pickle.dump(p, f, protocol=2)
return p
def load_distribution(dir, disease_prefix):
"""
Collect individual distribution profiles
:param dir:
:param disease:
:return:
"""
simulations = []
for i in np.arange(5000):
path = os.path.join(dir, disease_prefix + '_' + str(i) +
'_distribution.obj')
if os.path.exists(path):
with open(path, 'rb') as f:
try:
simulation = pickle.load(f)
simulations.append(simulation)
except:
pass
empirical_distributions = dict()
empirical_distributions['mf_XY_omit_z'] = \
np.concatenate([res['mf_XY_omit_z'] for res in simulations], axis=-1)
empirical_distributions['mf_Xz'] = \
np.concatenate([res['mf_Xz'] for res in simulations], axis=-1)
empirical_distributions['mf_Yz'] = \
np.concatenate([res['mf_Yz'] for res in simulations], axis=-1)
empirical_distributions['mf_XY_z'] = \
np.concatenate([res['mf_XY_z'] for res in simulations], axis=-1)
empirical_distributions['mf_XY_given_z'] = \
np.concatenate([res['mf_XY_given_z'] for res in simulations], axis=-1)
empirical_distributions['synergy'] = \
np.concatenate([res['synergy'] for res in simulations], axis=-1)
return empirical_distributions
def serialize_empirical_distributions(distribution, path):
M1 = distribution.shape[0]
M2 = distribution.shape[1]
N = distribution.shape[2]
sampling_1d_size = min([M1, M2, 5])
i_index = np.random.choice(np.arange(M1), sampling_1d_size, replace=False)
j_index = np.random.choice( | np.arange(M2) | numpy.arange |
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""
Resolution calculation using the Popovici method. Based on
neutrons.instruments.tas.tasres from the neutrons Python package, compiled by
<NAME>.
"""
import os
import multiprocessing
from numpy import pi, radians, degrees, sin, cos, tan, arcsin, arccos, \
arctan2, abs, sqrt, real, matrix, diag, cross, dot, array, arange, \
zeros, concatenate, reshape, delete, loadtxt
from numpy.random import randn
from numpy.linalg import inv, det, eig, norm
class unitcell(object):
"""
Class which models a crystallographic unit cell from given lattice
parameters and angles. Further it provides functions to make basic
calculations with the lattice vectors in the crystallographic and in the
standard cartesian coordinate system.
"""
def __init__(self, a, b, c, alpha, beta, gamma):
"""
Instantiates the class object: necessary input parameters are the
lattice constants a,b,c in Angstrom and the angles between the
crystallographic axes, namely alpha, beta, gamma in degrees.
"""
# unit cell parameters
self.a = float(a)
self.b = float(b)
self.c = float(c)
self.alpha = radians(alpha)
self.beta = radians(beta)
self.gamma = radians(gamma)
# often used values
cosa = cos(self.alpha)
cosb = cos(self.beta)
cosg = cos(self.gamma)
sing = sin(self.gamma)
# conversion matrices
# (s. International Tables of Crystallography, Volume B, 2nd Edition, p. 360)
# phi is a needed constant
phi = sqrt(1 - cosa**2 - cosb**2 - cosg**2 + 2*cosa*cosb*cosg)
# from crystallographic (x) to cartesian setting (X) with first component
# parallel to a and third to c* (reciprocal axis)
# X = Mx, (x in fractional units=>dimensionless, X in dimension of length)
self.crys2cartMat = matrix(
[[a, b * cosg, c * cosb],
[0, b * sing, c * (cosa - cosb*cosg)/sing],
[0, 0, c * phi/sing]]
)
# from cartesian (X) to crystallographic setting (x) with first
# component parallel to a and third to c* (reciprocal axis)
# x = M^(-1)X, (x in fractional units=>dimensionless, X in dimension of length)
self.cart2crysMat = matrix(
[[1./a, -1./(a*tan(self.gamma)), (cosa*cosg-cosb)/(a*phi*sing)],
[0, 1/(b*sing), (cosb*cosg-cosa)/(b*phi*sing)],
[0, 0, sing/(c*phi)]]
)
# ----- Real space lattice vectors
self.a_vec = self.crys2cart(1, 0, 0).transpose()
self.b_vec = self.crys2cart(0, 1, 0).transpose()
self.c_vec = self.crys2cart(0, 0, 1).transpose()
# this is needed if you only want to roatate from the cartesion to the
# crystal system without changing the length
self.crys2cartUnit = concatenate((self.a_vec/norm(self.a_vec),
self.b_vec/norm(self.b_vec),
self.c_vec/norm(self.c_vec)))
self.cart2crysUnit = self.crys2cartUnit.I
# Volume of unit cell
self.V = dot(array(self.a_vec)[0],
cross(array(self.b_vec)[0], array(self.c_vec)[0]))
# ----- Reciprocal space lattice basis vectors
self.a_star_vec = 2*pi*cross(array(self.b_vec)[0], array(self.c_vec)[0])/self.V
self.b_star_vec = 2*pi*cross(array(self.c_vec)[0], array(self.a_vec)[0])/self.V
self.c_star_vec = 2*pi*cross(array(self.a_vec)[0], array(self.b_vec)[0])/self.V
# ----- Reciprocal lattice index (r.l.u.) to cartesian matrix
self.rlu2cartMat = concatenate((matrix(self.a_star_vec).T,
matrix(self.b_star_vec).T,
matrix(self.c_star_vec).T), 1)
self.cart2rluMat = inv(self.rlu2cartMat)
# ----- Reciprocal lattice constants; note that there is a factor
# 2*pi difference to CrysFML definition
self.a_star = sqrt(dot(self.a_star_vec, self.a_star_vec))
self.b_star = sqrt(dot(self.b_star_vec, self.b_star_vec))
self.c_star = sqrt(dot(self.c_star_vec, self.c_star_vec))
# ----- Reciprocal lattice angles
self.ralpha = arccos(dot(self.b_star_vec, self.c_star_vec)/(self.b_star*self.c_star))
self.rbeta = arccos(dot(self.a_star_vec, self.c_star_vec)/(self.a_star*self.c_star))
self.rgamma = arccos(dot(self.b_star_vec, self.a_star_vec)/(self.b_star*self.a_star))
# ----- Reciprocal cell volume
self.VR = dot(self.a_star_vec, cross(self.b_star_vec, self.c_star_vec))
# ----- Review this array, I think it has to be transposed to be useful
self.Q2c = matrix([self.a_star_vec, self.b_star_vec, self.c_star_vec])
# ----- Metric tensors
self.GD = zeros([3, 3])
self.GR = zeros([3, 3])
# real space
self.GD[0, 0] = self.a**2
self.GD[1, 1] = self.b**2
self.GD[2, 2] = self.c**2
self.GD[0, 1] = self.a * self.b * | cos(self.gamma) | numpy.cos |
'''
this is the KPITU (Knee Point Identification Based on Trade-Off Utility) algorithm
'''
import numpy as np
import copy
import math as m
class solution(object):
def __init__(self, m):
self.index = -1
self.objective = np.zeros([1, m])
self.neighbor = []
self.contribution = -1
self.repoints = None
self.left = -1
class reference_point(object):
def __init__(self):
self.direction = None
self.neighbor = []
self.associate = []
def transfer(A, B):
if np.sum(A.objective - B.objective) > 0:
return 1
else:
return 0
def select(A, B):
return np.sum(A.objective - B.objective)
def Associate(p, w):
obj_mat = np.asarray([i.objective for i in p]).T
w_mat = np.asarray([i.direction for i in w])
d_mat = np.dot(w_mat, obj_mat)
for i in range(len(w_mat)):
d_mat[i, :] = d_mat[i, :] / np.sqrt(sum(w_mat[i, :]**2))
for i in range(len(obj_mat[0, :])):
length2 = sum(obj_mat[:, i]**2)
for j in range(len(d_mat[:, i])):
d_2 = length2-d_mat[j, i]**2
if d_2 < 0:
d_mat[j, i] = 0
else:
d_mat[j, i] = d_2
w[np.argmin(d_mat[:, i])].associate.append(p[i])
p[i].repoints = w[np.argmin(d_mat[:, i])]
return p, w
def main_function(data, K):
points = copy.copy(data)
num = len(points)
dim = len(points[0, :])
for i in range(dim):
Min = min(points[:, i])
Max = max(points[:, i])
if Max != Min:
points[:, i] = (points[:, i] - Min) / (Max - Min)
else:
points[:, i] = Min
div = 0
H = 0
while H < num:
div += 1
H = m.factorial(div + dim - 1) / (m.factorial(div) * m.factorial(dim - 1))
div -= 1
if div >= 20:
div = 20
else:
pass
list_range = [i / div for i in range(div + 1)]
direction = []
def w_generator(now_dim, now_sum, now_array):
if now_dim == 1:
for i in list_range:
temp_array = copy.copy(now_array)
if round(i + now_sum - 1, 5) == 0:
temp_array.append(i)
direction.append(temp_array)
else:
for i in list_range:
temp_array = copy.copy(now_array)
if round(i + now_sum - 1, 5) <= 0:
temp_array.append(i)
w_generator(now_dim - 1, now_sum + i, temp_array)
w_generator(dim, 0, [])
direction = np.asarray(direction)
Repoints = [reference_point() for i in range(len(direction))]
for i in range(len(direction)):
Repoints[i].direction = direction[i, :]
distance_list = np.sum((direction - direction[i, :] * np.ones(direction.shape)) ** 2, axis = 1)
distance_sort = np.argsort(distance_list)
temp_min_d = distance_list[distance_sort[1]]
current_index = 1
while round(temp_min_d - distance_list[distance_sort[current_index]], 5) == 0:
Repoints[i].neighbor.append(Repoints[distance_sort[current_index]])
current_index += 1
P = [solution(dim) for i in range(num)]
for i in range(num):
P[i].index = i
P[i].objective = points[i, :]
P, Repoints = Associate(P, Repoints)
for im in range(num):
P[im].neighbor = []
for i1 in P[im].repoints.associate:
if i1 is not P[im]:
if i1 not in P[im].neighbor:
P[im].neighbor.append(i1)
for i2 in P[im].repoints.neighbor:
for i3 in i2.associate:
if i3 not in P[im].neighbor:
P[im].neighbor.append(i3)
Current = P
Internal = []
Peripheral = []
Reserve = []
for j in Current:
for mm in j.neighbor:
if transfer(j, mm) == 1:
j.left = 1
else:
pass
if j.left != 1:
if min(j.repoints.direction) == 0 and dim not in [5, 8, 10]:
Peripheral.append(j)
else:
Internal.append(j)
else:
Reserve.append(j)
Peripheral_index = np.asarray([j.index for j in Peripheral])
Internal_index = np.asarray([j.index for j in Internal])
if len(Internal_index) == 0:
if len(Peripheral_index) < K:
if dim == 8 or dim == 10:
add_index = []
neighbor_num = len(Peripheral_index)
for i in Peripheral:
neighbor_num += len(i.neighbor)
for j in i.neighbor:
add_index.append(j.index)
if neighbor_num <= K:
Internal_index = np.hstack((Peripheral_index, np.asarray(add_index)))
else:
gain_list_all = []
for i in Peripheral:
gain_value = 0
gain_list = []
for j in i.neighbor:
for k in i.neighbor:
gain_value += select(j, k)
gain_list.append(gain_value)
gain_sort = np.argsort(gain_list)
for j in range(len(gain_sort)):
gain_list[gain_sort[j]] = len(gain_sort) - j
gain_list_all = gain_list_all + gain_list
gain_sort_all = np.argsort(gain_list_all)
select_index = [add_index[gain_sort_all[k]] for k in range(K-len(Peripheral_index))]
Internal_index = np.hstack((Peripheral_index, np.asarray(select_index)))
else:
for i in Peripheral:
gain_value = 0
gain_list = []
for j in i.neighbor:
for k in i.neighbor:
gain_value += select(j, k)
gain_list.append(gain_value)
gain_list = np.asarray(gain_list)
SK = np.argsort(gain_list)
SK = list(SK[:m.ceil(K / len(Internal) - 1)])
for sel_index in SK:
Peripheral_index = | np.hstack((Peripheral_index, i.neighbor[sel_index].index)) | numpy.hstack |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
import warnings
from orangecontrib.wonder.fit.parameters.measured_data.phase import Phase
from orangecontrib.wonder.fit.parameters.initialization.fft_parameters import FFTTypes
from orangecontrib.wonder.fit.parameters.instrument.zero_error import ZeroError
from orangecontrib.wonder.fit.parameters.instrument.lab6_tan_correction import Lab6TanCorrection
from orangecontrib.wonder.fit.parameters.instrument.caglioti import Caglioti
from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters
from orangecontrib.wonder.fit.parameters.instrument.speciment_displacement import SpecimenDisplacement
from orangecontrib.wonder.fit.parameters.thermal.thermal_parameters import ThermalParameters
from orangecontrib.wonder.fit.parameters.instrument.background_parameters import ChebyshevBackground, ExpDecayBackground
from orangecontrib.wonder.fit.parameters.microstructure.strain import InvariantPAH, WarrenModel, KrivoglazWilkensModel
from orangecontrib.wonder.fit.parameters.measured_data.phase_gsasii import GSASIIPhase
from orangecontrib.wonder.fit.functions.gsasii_functions import gsasii_intensity_factor
from orangecontrib.wonder.fit.parameters.additional.pseudo_voigt_peak import SpuriousPeaks
from orangecontrib.wonder.fit.functions.chemical_formula_parser import ChemicalFormulaParser
from orangecontrib.wonder.util.fit_utilities import Utilities, Symmetry
warnings.filterwarnings("ignore", category=RuntimeWarning)
class Distribution:
DELTA = "delta"
LOGNORMAL = "lognormal"
GAMMA = "gamma"
YORK = "york"
@classmethod
def tuple(cls):
return [cls.DELTA, cls.LOGNORMAL, cls.GAMMA, cls.YORK]
class Shape:
NONE = "none"
SPHERE = "sphere"
CUBE = "cube"
TETRAHEDRON = "tetrahedron"
OCTAHEDRON = "octahedron"
CYLINDER = "cylinder"
WULFF = "wulff solid"
@classmethod
def tuple(cls):
return [cls.NONE, cls.SPHERE, cls.CUBE, cls.TETRAHEDRON, cls.OCTAHEDRON, cls.CYLINDER, cls.WULFF]
class WulffCubeFace:
TRIANGULAR = "triangular"
HEXAGONAL = "hexagonal"
@classmethod
def tuple(cls):
return [cls.TRIANGULAR, cls.HEXAGONAL]
class Normalization:
NORMALIZE_TO_N = 0
NORMALIZE_TO_N2 = 1
@classmethod
def tuple(cls):
return ["to N", "to N\u00b2"]
def __H_invariant_square(h, k, l):
numerator = (h * h * k * k + k * k * l * l + l * l * h * h)
denominator = (h**2 + k**2 + l**2)**2
return numerator / denominator
def __merge_functions(list_of_pairs, s):
# x step must be the same for all functions
intensity = numpy.zeros(len(s))
for pair_index in range(list_of_pairs.shape[0]):
intensity += numpy.interp(s, list_of_pairs[pair_index, 0], list_of_pairs[pair_index, 1])
return intensity
#################################################
#
# FIT FUNCTION
#
#################################################
def fit_function_direct(twotheta, fit_global_parameters, diffraction_pattern_index = 0):
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
wavelength = incident_radiation.wavelength.value
I = fit_function_reciprocal(Utilities.s(0.5*numpy.radians(twotheta), wavelength),
fit_global_parameters,
diffraction_pattern_index)
# POLARIZATION FACTOR --------------------------------------------------------------------------------------
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_polarization_factor:
twotheta_mono = polarization_parameters.twotheta_mono
I *= polarization_factor(numpy.radians(twotheta),
None if twotheta_mono is None else numpy.radians(twotheta_mono),
polarization_parameters.degree_of_polarization,
polarization_parameters.beampath)
# ADD BACKGROUNDS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.background_parameters is None:
for key in fit_global_parameters.background_parameters.keys():
background_parameters = fit_global_parameters.get_background_parameters_item(key, diffraction_pattern_index)
if not background_parameters is None:
if key == ChebyshevBackground.__name__:
parameters=[background_parameters.c0.value,
background_parameters.c1.value,
background_parameters.c2.value,
background_parameters.c3.value,
background_parameters.c4.value,
background_parameters.c5.value,
background_parameters.c6.value,
background_parameters.c7.value,
background_parameters.c8.value,
background_parameters.c9.value]
add_chebyshev_background(twotheta, I, parameters)
elif key == ExpDecayBackground.__name__:
add_expdecay_background(twotheta,
I,
parameters=[background_parameters.a0.value,
background_parameters.b0.value,
background_parameters.a1.value,
background_parameters.b1.value,
background_parameters.a2.value,
background_parameters.b2.value])
# ADD PSEUDO VOIGTS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.additional_parameters is None:
for key in fit_global_parameters.additional_parameters.keys():
additional_parameters = fit_global_parameters.get_additional_parameters_item(key, diffraction_pattern_index)
if not additional_parameters is None:
if key == SpuriousPeaks.__name__:
add_pseudo_voigt_peaks(twotheta, I, additional_parameters)
return I
def fit_function_reciprocal(s, fit_global_parameters, diffraction_pattern_index = 0):
line_profile = fit_global_parameters.measured_dataset.get_line_profile(diffraction_pattern_index)
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
# CONSTRUCTION OF EACH SEPARATE PEAK ---------------------------------------------------------------------------
phases_number = fit_global_parameters.measured_dataset.get_phases_number()
separated_phases_intensities = numpy.full((phases_number, 2), None)
for phase_index in range(phases_number):
phase = fit_global_parameters.measured_dataset.get_phase(phase_index)
if not Phase.is_cube(phase.symmetry): raise ValueError("Only Cubic structures are supported by fit")
reflections_number = line_profile.get_reflections_number(phase_index)
separated_peaks_functions = numpy.full((reflections_number, 2), None)
for reflection_index in range(reflections_number):
if isinstance(phase, GSASIIPhase):
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=line_profile.get_additional_parameters_of_phase(phase_index))
else:
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters)
separated_peaks_functions[reflection_index, 0] = s_analytical
separated_peaks_functions[reflection_index, 1] = intensity_analytical
# INTERPOLATION ONTO ORIGINAL S VALUES -------------------------------------------------------------------------
intensity_phase = __merge_functions(separated_peaks_functions, s)
# ADD SAXS
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.DELTA and size_parameters.add_saxs:
if not phase.use_structure: NotImplementedError("SAXS is available when the structural model is active")
intensity_phase += saxs(s,
size_parameters.mu.value,
phase.a.value,
phase.formula,
phase.symmetry,
size_parameters.normalize_to)
# ADD DEBYE-WALLER FACTOR --------------------------------------------------------------------------------------
thermal_parameters = fit_global_parameters.get_thermal_parameters_item(ThermalParameters.__name__, diffraction_pattern_index)
if not thermal_parameters is None:
debye_waller_factor = thermal_parameters.get_debye_waller_factor(phase_index)
if not debye_waller_factor is None:
intensity_phase *= debye_waller(s, debye_waller_factor.value*0.01) # from A-2 to nm-2
separated_phases_intensities[phase_index, 0] = s
separated_phases_intensities[phase_index, 1] = intensity_phase
intensity = __merge_functions(separated_phases_intensities, s)
if not incident_radiation.is_single_wavelength:
principal_wavelength = incident_radiation.wavelength
intensity_scaled = intensity*incident_radiation.get_principal_wavelenght_weight()
separated_secondary_intensities = numpy.full((len(incident_radiation.secondary_wavelengths), 2), None)
secondary_index = 0
for secondary_wavelength, secondary_wavelength_weigth in zip(incident_radiation.secondary_wavelengths,
incident_radiation.secondary_wavelengths_weights):
s_secondary = s * secondary_wavelength.value/principal_wavelength.value
separated_secondary_intensities[secondary_index, 0] = s_secondary
separated_secondary_intensities[secondary_index, 1] = intensity*secondary_wavelength_weigth.value
secondary_index += 1
intensity = intensity_scaled + __merge_functions(separated_secondary_intensities, s)
return intensity
#################################################
# FOURIER FUNCTIONS
#################################################
class FourierTranformFactory:
@classmethod
def get_fourier_transform(cls, type=FFTTypes.REAL_ONLY):
if type == FFTTypes.REAL_ONLY:
return FourierTransformRealOnly
elif type == FFTTypes.FULL:
return FourierTransformFull
else:
raise ValueError("Type not recognized")
class FourierTransform:
@classmethod
def fft(cls, f, n_steps, dL):
raise NotImplementedError()
@classmethod
def get_empty_fft(cls, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
I = numpy.zeros(len(s))
I[int(len(s)/2)] = 1.0
return s, I
class FourierTransformRealOnly(FourierTransform):
@classmethod
def _real_absolute_fourier(cls, y):
return numpy.fft.fftshift(numpy.abs(numpy.real(numpy.fft.fft(y))))
@classmethod
def _fft_normalized(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
integral = numpy.trapz(y_fft, s)
return s, y_fft / integral
@classmethod
def fft(cls, f, n_steps, dL):
return cls._fft_normalized(cls._real_absolute_fourier(f), n_steps, dL)
from scipy.integrate import simps
class FourierTransformFull(FourierTransform):
@classmethod
def _full_fourier(cls, y):
return numpy.fft.fftshift(numpy.fft.fft(y))
@classmethod
def _fft_shifted(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
y_fft -= y_fft[0]
return s, y_fft
@classmethod
def _fft_real(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.real(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _fft_imag(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.imag(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _normalize(cls, s, i):
return s, i/simps(i, s)
@classmethod
def fft(cls, f, n_steps, dL):
sr, fft_real = cls._fft_real(numpy.real(f), n_steps, dL)
si, fft_imag = cls._fft_imag(numpy.imag(f), n_steps, dL)
return cls._normalize(sr, fft_real - fft_imag)
#################################################
# CALCOLO DI UN SINGOLO PICCO
#################################################
def create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=None):
fft_type = fit_global_parameters.fit_initialization.fft_parameters.fft_type
fit_space_parameters = fit_global_parameters.space_parameters()
reflection = line_profile.get_reflection(phase_index, reflection_index)
wavelength = incident_radiation.wavelength.value
lattice_parameter = phase.a.value
fourier_amplitudes = None
# INSTRUMENTAL PROFILE ---------------------------------------------------------------------------------------------
instrumental_profile_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(Caglioti.__name__, diffraction_pattern_index)
if not instrumental_profile_parameters is None:
if fourier_amplitudes is None:
fourier_amplitudes = instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
else:
fourier_amplitudes *= instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
# SIZE -------------------------------------------------------------------------------------------------------------
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.LOGNORMAL:
if size_parameters.shape == Shape.SPHERE:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.shape == Shape.WULFF:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
else:
fourier_amplitudes *=size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
elif size_parameters.distribution == Distribution.GAMMA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.distribution == Distribution.DELTA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
# STRAIN -----------------------------------------------------------------------------------------------------------
strain_parameters = fit_global_parameters.get_strain_parameters(phase_index)
if not strain_parameters is None and strain_parameters.active:
if isinstance(strain_parameters, InvariantPAH): # INVARIANT PAH
if fourier_amplitudes is None:
fourier_amplitudes = strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
else:
fourier_amplitudes *= strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
elif isinstance(strain_parameters, KrivoglazWilkensModel): # KRIVOGLAZ-WILKENS
if fourier_amplitudes is None:
fourier_amplitudes = strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
else:
fourier_amplitudes *= strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
elif isinstance(strain_parameters, WarrenModel): # WARREN
fourier_amplitudes_re, fourier_amplitudes_im = strain_warren_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.average_cell_parameter.value)
if fft_type == FFTTypes.FULL:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re + 1j*fourier_amplitudes_im
else:
fourier_amplitudes = (fourier_amplitudes*fourier_amplitudes_re) + 1j*(fourier_amplitudes*fourier_amplitudes_im)
elif fft_type == FFTTypes.REAL_ONLY:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re
else:
fourier_amplitudes *= fourier_amplitudes_re
# FFT -----------------------------------------------------------------------------------------------------------
if not fourier_amplitudes is None:
s, I = FourierTranformFactory.get_fourier_transform(fft_type).fft(fourier_amplitudes,
n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
else:
s, I = FourierTransform.get_empty_fft(n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
s_hkl = Utilities.s_hkl(lattice_parameter, reflection.h, reflection.k, reflection.l)
s += s_hkl
# INTENSITY MODULATION: STRUCTURAL MODEL YES/NO --------------------------------------------------------------------
if phase.use_structure:
if isinstance(phase, GSASIIPhase):
I *= phase.intensity_scale_factor.value * gsasii_intensity_factor(reflection.h,
reflection.k,
reflection.l,
gsas_reflections_list)
else:
I *= phase.intensity_scale_factor.value * \
multiplicity_cubic(reflection.h, reflection.k, reflection.l) * \
squared_modulus_structure_factor(s_hkl,
phase.formula,
reflection.h,
reflection.k,
reflection.l,
phase.symmetry)
else:
I *= reflection.intensity.value
#TODO: AGGIUNGERE GESTIONE TDS con strutture dati + widget ad hoc
# PEAK SHIFTS -----------------------------------------------------------------------------------------------------
if not fit_global_parameters.shift_parameters is None:
theta = Utilities.theta(s, wavelength)
for key in fit_global_parameters.shift_parameters.keys():
shift_parameters = fit_global_parameters.get_shift_parameters_item(key, diffraction_pattern_index)
if not shift_parameters is None:
if key == Lab6TanCorrection.__name__:
s += lab6_tan_correction(theta, wavelength,
shift_parameters.ax.value,
shift_parameters.bx.value,
shift_parameters.cx.value,
shift_parameters.dx.value,
shift_parameters.ex.value)
elif key == ZeroError.__name__:
s += Utilities.s(shift_parameters.shift.value/2, wavelength)
elif key == SpecimenDisplacement.__name__:
s += specimen_displacement(theta, wavelength, shift_parameters.goniometer_radius, shift_parameters.displacement.value*1e-6) # to m
# LORENTZ FACTOR --------------------------------------------------------------------------------------
if not fit_global_parameters.instrumental_profile_parameters is None:
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_lorentz_factor:
if polarization_parameters.lorentz_formula == LorentzFormula.Shkl_Shkl:
I *= lorentz_factor_simplified_normalized(s_hkl, wavelength)
elif polarization_parameters.lorentz_formula == LorentzFormula.S_Shkl:
I *= lorentz_factor_normalized(s, s_hkl, wavelength)
return s, I
######################################################################
# FUNZIONI WPPM
######################################################################
import numpy
from scipy.special import erfc
import os
# performance improvement
######################################################################
# THERMAL AND POLARIZATION
######################################################################
def debye_waller(s, B):
return numpy.exp(-0.5*B*(s**2)) # it's the exp(-2M) = exp(-Bs^2/2)
def lorentz_factor(s, s_hkl):
return 1/(s*s_hkl)
def lorentz_factor_normalized(s, s_hkl, wavelength):
return lorentz_factor(s, s_hkl)/numpy.sqrt(1 - (s*wavelength/2)**2)
def lorentz_factor_simplified(s_hkl):
return 1/(s_hkl**2)
def lorentz_factor_simplified_normalized(s_hkl, wavelength):
return lorentz_factor_simplified(s_hkl)/numpy.sqrt(1 - (s_hkl*wavelength/2)**2)
def polarization_factor(twotheta, twotheta_mono, degree_of_polarization, beampath):
Q = degree_of_polarization
if twotheta_mono is None or twotheta_mono == 0.0:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta)**2))/2
else:
if beampath == Beampath.PRIMARY:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta_mono)**2)*(numpy.cos(twotheta)**2))/(1 + (numpy.cos(twotheta_mono)**2))
elif beampath == Beampath.SECONDARY:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta_mono)**2)*(numpy.cos(twotheta)**2))/2
######################################################################
# SIZE
######################################################################
from scipy.special import gamma as G, gammaincc as GU
def size_function_delta(L, D):
LfracD = L/D
return 1 - 1.5*LfracD + 0.5*LfracD**3
def size_function_lognormal(L, sigma, mu):
modL = numpy.abs(L)
lnModL = numpy.log(modL)
sqrt2 = numpy.sqrt(2)
size = 0.5*erfc((lnModL - mu -3*sigma**2)/(sigma*sqrt2)) + \
-0.75*modL*erfc((lnModL - mu -2*sigma**2)/(sigma*sqrt2))*numpy.exp(-mu - 2.5*sigma**2) + \
0.25*(L**3)*erfc((lnModL - mu)/(sigma*sqrt2)) *numpy.exp(-3*mu - 4.5*sigma**2)
return size
def size_function_gamma(L, g, mu):
Lgm = L*g/mu
size = ((0.5*(Lgm**3)*GU(g, Lgm)) - \
(1.5*Lgm*GU(g+2, Lgm)) + \
GU(g+3, Lgm)) / G(g+3)
return size
def lognormal_distribution(mu, sigma, x):
return numpy.exp(-0.5*((numpy.log(x)-mu)/(sigma))**2)/(x*sigma*numpy.sqrt(2*numpy.pi))
def delta_distribution(mu, x):
distribution = numpy.zeros(len(x))
distribution[numpy.where(x==mu)] = 1.0
return distribution
def gamma_distribution(mu, g, x):
gxm = g*x/mu
return (g/(mu*G(g)))*(gxm**(g-1))* | numpy.exp(-gxm) | numpy.exp |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from SlidingWindowVideoTDA.VideoTools import *
from Alignment.AllTechniques import *
from Alignment.AlignmentTools import *
from Alignment.Alignments import *
from Alignment.DTWGPU import *
from Alignment.ctw.CTWLib import *
from PaperFigures import makeColorbar
WEIPATH = "Alignment/ctw/data/wei/"
WEIVIDEOS = ["daria_walk", "denis_walk", "eli_walk", "ira_walk"]
WEICROP = {'daria_walk':[10, 61], 'denis_walk':[8, 61], 'eli_walk':[3, 61], 'ido_walk':[0, 44], 'ira_walk':[0, 61]}
def getWeiNamesFromStruct(dtype):
"""
Purpose: For some reason the Weizmann struct got unrolled so the dtype
holds the action names. Given the dtype, split it up into a list of
strings
"""
s = "%s"%dtype
s = s.replace(", ", "")
names = [(n.replace("('", "")).replace("'", "") for n in s.split("'O')")[0:-1]]
names[0] = names[0][1::]
return names
def getWeiAlignedMask(action, doCrop = True):
X = sio.loadmat(WEIPATH + "/mask.mat")
X = X['aligned_masks'][0][0]
names = getWeiNamesFromStruct(X.dtype)
idx = 0
for idx in range(len(names)):
if names[idx] == action:
break
print("name = %s, idx = %i"%(action, idx))
I = X[idx]
I = np.rollaxis(I, 2, 0)
IDims = I.shape[1::]
I = | np.reshape(I, (I.shape[0], IDims[0]*IDims[1])) | numpy.reshape |
from typing import List
import numba as nb
import numpy as np
from astropy.io import fits
__all__ = ["makeaperpixmaps", "distarr", "subdistarr", "apercentre", "aperpixmap"]
def makeaperpixmaps(npix: int, folderpath=None) -> None:
'''Writes the aperture binary masks out after calculation.
Parameters
----------
npix : int
Width of aperture image.
folderpath : Pathlib object
Path to the folder where the aperture masks should be saved.
Returns
-------
None
'''
cenpix = npix / 2. + 1.
r_aper = np.arange(cenpix) + 1.
numaper = len(r_aper)
for i in range(0, numaper):
aperturepixelmap = aperpixmap(npix, r_aper[i], 9, .1)
if folderpath:
fileout = folderpath / f"aperture{i}.fits"
else:
fileout = f"aperture{i}.fits"
fits.writeto(fileout, aperturepixelmap, overwrite=True)
@nb.njit
def aperpixmap(npix: int, rad: float, nsubpix: int, frac: float) -> np.ndarray:
'''Calculate aperture binary mask.
Calculates the aperture binary mask through pixel sampling knowing
the aperture radius and number of subpixels.
Near direct translation of IDL code.
Parameters
----------
npix : int
Width of aperture image.
rad : float
Radius of the aperture.
nsubpix : int
Number of subpixels
frac : float
Fraction of something... Maybe due to Petrosian magnitude?
Returns
-------
np.ndarry
Numpy array that stores the mask.
'''
npix = int(npix)
cenpix = np.array([int(npix/2) + 1, int(npix/2) + 1])
mask = np.zeros((npix, npix))
submasksize = (npix*nsubpix, npix*nsubpix)
# create subdistance array
subdist = subdistarr(npix, nsubpix, cenpix)
xcoord = 0
ycoord = 0
# subpixel coordinates
x_min = 0
y_min = 0
x_max = nsubpix - 1
y_max = nsubpix - 1
inds = np.arange(0, npix*npix)
subpixels = np.zeros((npix*npix, nsubpix, nsubpix))
i = 0
for i in range(0, (npix*npix)):
subpixels[i, :, :] = subdist[x_min:x_max+1, y_min:y_max+1]
xcoord += 1
x_min += nsubpix
x_max += nsubpix
if y_max > submasksize[1]:
break
if x_max > submasksize[0]:
xcoord = 0
ycoord += 1
x_min = 0
x_max = nsubpix - 1
y_min += nsubpix
y_max += nsubpix
for i in range(0, (npix*npix)):
# TODO. This is really inefficient...
apersubpix = (subpixels[i, :, :].flatten()[::-1] <= rad).nonzero()[0]
apersubpix_size = apersubpix.shape
fraction = float(apersubpix_size[0]) / float(nsubpix**2)
if fraction >= frac:
x = int(inds[i] % npix)
y = int(inds[i] // npix)
mask[x, y] = 1
return mask
@nb.njit(nb.float64[:, :](nb.int64, nb.int64, nb.int64[:]))
def distarr(npixx: int, npixy: int, cenpix: np.ndarray) -> np.ndarray:
'''Creates an array of distances from given centre pixel.
Near direct translation of IDL code.
Parameters
----------
npixx : int
Number of x pixels in the aperture mask.
npixy : int
Number of y pixels in the aperture mask.
cenpix : np.ndarray
Location of central pixels.
Returns
-------
np.ndarray
array of distances.
'''
y1 = | np.arange(npixy) | numpy.arange |
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from collections import OrderedDict
from datetime import datetime, timezone
from pathlib import Path
import numpy as np
from ...utils import fill_doc, logger, verbose, warn, _check_fname
from ..base import BaseRaw
from ..meas_info import create_info
from ...annotations import Annotations
from ..utils import _mult_cal_one
def _ensure_path(fname):
out = fname
if not isinstance(out, Path):
out = Path(out)
return out
@fill_doc
def read_raw_nihon(fname, preload=False, verbose=None):
"""Reader for an Nihon Kohden EEG file.
Parameters
----------
fname : str
Path to the Nihon Kohden data file (``.EEG``).
preload : bool
If True, all data are loaded at initialization.
%(verbose)s
Returns
-------
raw : instance of RawNihon
A Raw object containing Nihon Kohden data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNihon(fname, preload, verbose)
_valid_headers = [
'EEG-1100A V01.00',
'EEG-1100B V01.00',
'EEG-1100C V01.00',
'QI-403A V01.00',
'QI-403A V02.00',
'EEG-2100 V01.00',
'EEG-2100 V02.00',
'DAE-2100D V01.30',
'DAE-2100D V02.00',
# 'EEG-1200A V01.00', # Not working for the moment.
]
def _read_nihon_metadata(fname):
metadata = {}
fname = _ensure_path(fname)
pnt_fname = fname.with_suffix('.PNT')
if not pnt_fname.exists():
warn('No PNT file exists. Metadata will be blank')
return metadata
logger.info('Found PNT file, reading metadata.')
with open(pnt_fname, 'r') as fid:
version = | np.fromfile(fid, '|S16', 1) | numpy.fromfile |
from junctiontree import computation as comp
import numpy as np
from .util import assert_potentials_equal
def get_arrays_and_vars(tree, node_list, potentials):
"""Get all arrays and their variables as a flat list
Output: [array1, vars1, ..., arrayN, varsN]
"""
return list([potentials[tree[0]],node_list[tree[0]]]) + sum(
[
get_arrays_and_vars(child_tree, node_list, potentials)
for child_tree in tree[1:]
],
[]
)
def brute_force_sum_product(tree, node_list, potentials):
"""Compute brute force sum-product with einsum """
# Function to compute the sum-product with brute force einsum
arrays_vars = get_arrays_and_vars(tree, node_list, potentials)
f = lambda output_vars: np.einsum(*(arrays_vars + [output_vars]))
def __run(tree, node_list, p, f, res=[]):
res.append(f(node_list[tree[0]]))
for child_tree in tree[1:]:
__run(child_tree, node_list, p, f, res)
return res
return __run(tree, node_list, potentials, f)
def assert_sum_product(tree, node_order, potentials, variables):
""" Test shafer-shenoy vs brute force sum-product """
# node_order represents the order nodes are traversed
# in get_arrays_and_vars function
assert_potentials_equal(
brute_force_sum_product(
tree,
[variables[idx] for idx in node_order],
[potentials[idx] for idx in node_order]
),
comp.compute_beliefs(tree, potentials, variables)
)
def test_one_scalar_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(),
],
[[]] # no variables for scalar
)
def test_one_matrix_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(2, 3),
],
[
[3,5]
]
)
def test_one_child_node_with_all_variables_shared():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 2),
np.ones((3, 2)),
],
[
[3,5],
[5,3],
[5,3]
]
)
def test_one_child_node_with_one_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.ones((3,)),
],
[
[3,5],
[5,9],
[5]
]
)
def test_one_child_node_with_no_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
| np.random.randn(2) | numpy.random.randn |
import numpy as np
from numpy import matlib
from random import randrange
from utils.planing_utils import GetUniformPolicy, generalized_greedy, PolicyIteration_GivenRP, draw_policy_at_random
# ------------------------------------------------------------------------------------------------------------~
def run_learning_method(args_r, M, n_traj, gamma_guidance, l2_factor, l1_factor):
if args_r.method in {'Expected_SARSA','SARSA', 'LSTDQ', 'ELSTDQ', 'LSTDQ_nested', 'ELSTDQ_nested'}:
pi_t = model_free_approx_policy_iteration(args_r, M, n_traj, gamma_guidance, l2_factor, l1_factor)
elif args_r.method == 'Model_Based':
pi_t = model_based_learning(args_r, M, n_traj, gamma_guidance, l2_factor, l1_factor)
else:
raise AssertionError('unrecognized method')
return pi_t
# -------------------------------------------------------------------------------------------
def model_based_learning(args, M, n_traj, gamma_guidance, l2_factor=None, l1_factor=None):
if l2_factor is not None or l1_factor is not None:
raise AssertionError('Not supported')
nS = args.nS
nA = args.nA
epsilon = args.epsilon
# Initial behaviour policy - this is the policy used for collecting data
if 'initial_policy' not in args or args.initial_policy == 'uniform':
pi_b = GetUniformPolicy(nS, nA)
elif args.initial_policy == 'generated_random':
pi_b = draw_policy_at_random(nS, nA)
else:
raise AssertionError('Unrecognized args.initial_policy')
pi_t = pi_b.copy() # Initial target policy - the is the policy maintained by policy iteration
data_history = [] # data from all previous episodes
# Run episodes
for i_episode in range(args.n_episodes):
# Generate data:
data = M.SampleData(args, pi_b, n_traj, p0=None)
data_history += data
# Improve policy:
# 1. Estimate model:
P_est, R_est = ModelEstimation(data_history, nS, nA)
# 2. Certainty-Equivalence policy w.r.t model-estimation and gamma_guidance:
pi_t, _, _ = PolicyIteration_GivenRP(R_est, P_est, gamma_guidance, args)
pi_b = (1 - epsilon) * pi_t + (epsilon / nA)
# end for i_episode
return pi_t
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
def model_free_approx_policy_iteration(args, M, n_traj, gamma_guidance, l2_factor=None, l1_factor=None):
nS = args.nS
nA = args.nA
data_history = [] # data from all previous episodes
# Initial behaviour policy - this is the policy used for collecting data
if 'initial_policy' not in args or args.initial_policy == 'uniform':
pi_b = GetUniformPolicy(nS, nA)
elif args.initial_policy == 'generated_random':
pi_b = draw_policy_at_random(nS, nA)
else:
raise AssertionError('Unrecognized args.initial_policy')
pi_t = pi_b.copy() # Initial target policy - the is the policy maintained by policy iteration
Q_est = None # the Q-function will be initialized in the first evaluation step
# Run episodes:
for i_episode in range(args.n_episodes):
# Generate data:
data = M.SampleData(args, pi_b, n_traj, p0=None)
data_history += data
# Improve value estimation:
if args.method in {'Expected_SARSA'}:
# since this is off-policy evaluation - use all data
Q_est = run_expected_sarsa(data_history, pi_t, gamma_guidance, args,
initial_Q=Q_est, l2_factor=l2_factor, l1_factor=l1_factor)
elif args.method in {'LSTDQ', 'ELSTDQ'}:
# since this is off-policy evaluation - use all data
Q_est = LSTDQ(data_history, pi_t, gamma_guidance, args, l2_factor=l2_factor, l1_factor=l1_factor)
elif args.method in {'LSTDQ_nested', 'ELSTDQ_nested'}:
# since this is off-policy evaluation - use all data
Q_est = LSTDQ_nested(data_history, pi_t, gamma_guidance, args, l2_factor=l2_factor, l1_factor=l1_factor)
elif args.method in {'SARSA'}:
# since this is on-policy evaluation - use data only from current policy
Q_est = run_sarsa(data, nS, nA, gamma_guidance, args, initial_Q=Q_est, l2_factor=l2_factor, l1_factor=l1_factor)
else:
raise AssertionError('unrecognized method')
# Improve policy:
pi_t = generalized_greedy(Q_est)
# behaviour policy : For exploration set an epsilon-greedy policy:
epsilon = args.epsilon
pi_b = (1 - epsilon) * pi_t + (epsilon / nA)
return pi_t
# ------------------------------------------------------------------------------------------------------------~
# value estimation:
def run_value_estimation_method(data, M, args, gamma_guidance, l2_proj, l2_fp, l2_TD):
gammaEval = args.gammaEval
nS = M.nS
V_true = np.linalg.solve((np.eye(nS) - gammaEval * M.P), M.R)
if args.alg_type == 'LSTD':
V_est = LSTD(data, gamma_guidance, args, l2_factor=l2_proj)
elif args.alg_type == 'LSTD_Nested':
V_est = LSTD_Nested(data, gamma_guidance, args, l2_proj, l2_fp)
elif args.alg_type == 'LSTD_Nested_Standard':
V_est = LSTD_Nested_Standard(data, gamma_guidance, args, l2_proj, l2_fp)
elif args.alg_type == 'batch_TD_value_evaluation':
V_est = batch_TD_value_evaluation(data, gamma_guidance, args, l2_factor=l2_TD)
elif args.alg_type == 'model_based_pol_eval':
V_est = model_based_pol_eval(data, gamma_guidance, args)
elif args.alg_type == 'model_based_known_P':
V_est = model_based_known_P(data, gamma_guidance, args, M)
else:
raise AssertionError('Unrecognized args.grid_type')
return V_est, V_true
# ------------------------------------------------------------------------------------------------------------~
def set_learning_rate(i_iter, args, gamma_guidance):
learning_rate_def = args.learning_rate_def
lr_type = learning_rate_def['type']
if lr_type == 'const':
alpha = learning_rate_def['alpha']
elif lr_type == 'a/(b+i_iter)':
a = learning_rate_def['a']
b = learning_rate_def['b']
alpha = a / (b + i_iter)
elif args.learning_rate_def == 'a/(b+sqrt(i_iter))':
a = learning_rate_def['a']
b = learning_rate_def['b']
alpha = a / (b + np.sqrt(i_iter))
else:
raise AssertionError('Invalid learning_rate_def')
if learning_rate_def['scale']:
# explanation to scaling: according to equivalency proposition, this scaling would make discount reg (using gamma) to behave like using gammaEval but with added reg term.
gammaEval = args.gammaEval
alpha *= gammaEval / gamma_guidance
return alpha
# ------------------------------------------------------------------------------------------------------------~
def set_initial_value(args, shape, gamma_guidance):
TD_Init_type = args.TD_Init_type
gammaEval = args.gammaEval
Rmax = 1 # assumption in mdp creation
Vmax = Rmax / (1-gammaEval)
# explanation to scaling: according to equivalency proposition, this scaling would make discount reg (using gamma) to behave like using gammaEval but with added reg term.
VmaxGamma = Rmax / (1-gamma_guidance)
if isinstance(shape, int):
shape = [shape]
if TD_Init_type == 'random_0_1':
x = np.random.rand(*shape)
elif TD_Init_type == 'random_0_Vmax':
x = np.random.rand(*shape) * Vmax
elif TD_Init_type == 'zero':
x = np.zeros(shape)
elif TD_Init_type == 'Vmax':
x = np.ones(shape) * Vmax
elif TD_Init_type == '0.5_Vmax':
x = 0.5 * np.ones(shape) / (1 - gammaEval)
elif TD_Init_type == 'VmaxGamma':
x = np.ones(shape) * VmaxGamma
else:
raise AssertionError('Invalid TD_Init_type')
return x
# ------------------------------------------------------------------------------------------------------------~
def ModelEstimation(data, nS, nA):
"""
Maximum-Likelihood estimation of model based on data
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
S: number of states
A: number of actions
Returns:
P_est: [S x A x S] estimated transitions probabilities matrix P_{s,a,s'}=P(s'|s,a)
R_est: [S x A] estimated mean rewards matrix R
"""
counts_sas = np.zeros((nS, nA, nS))
counts_sa = np.zeros((nS, nA))
R_est = np.zeros((nS, nA))
P_est = np.zeros((nS, nA, nS))
for traj in data:
for sample in traj:
(s, a, r, s_next, a_next) = sample
counts_sa[s, a] += 1
counts_sas[s, a, s_next] += 1
R_est[s, a] += r
for s in range(nS):
for a in range(nA):
if counts_sa[s, a] == 0:
# if this state-action doesn't exist in data
# Use default values:
R_est[s, a] = 0.5
P_est[s, a, :] = 1 / nS
else:
R_est[s, a] /= counts_sa[s, a]
P_est[s, a, :] = counts_sas[s, a, :] / counts_sa[s, a]
if np.any(np.abs(P_est.sum(axis=2) - 1) > 1e-5):
raise RuntimeError('Transition Probability matrix not normalized!!')
return P_est, R_est
# ------------------------------------------------------------------------------------------------------------~
def TD_value_evaluation(data, nS, nA, gamma, args):
"""
Runs TD iterations on data set of samples from unknown policy to estimate the value function of this policy
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
nS: number of states
nA: number of actions
gamma: Discount factor
Returns:
V_est: [S] The estimated value-function for a fixed policy pi, i,e. the the expected discounted return when following pi starting from some state
"""
gammaEval = args.gammaEval
# Initialization:
V_est = set_initial_value(args, nS, gamma)
# prev_V = V_pi.copy()
# stop_diff = 1e-5 # stopping condition
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
n_samples = len(data_tuples)
for i_iter in range(args.n_TD_iter):
alpha = set_learning_rate(i_iter, args, gamma)
# Choose random sample:
i_sample = randrange(n_samples)
(s, a, r, s_next, a_next) = data_tuples[i_sample]
if args.use_reward_scaling:
r *= gamma / gammaEval
delta = r + gamma * V_est[s_next] - V_est[s]
V_est[s] += alpha * delta
# end for i_iter
# if i_iter > 0 and (i_iter % 10000 == 0):
# if np.linalg.norm(V_pi - prev_V) < stop_diff:
# break
# prev_V = V_pi
return V_est
# ------------------------------------------------------------------------------------------------------------~
def run_sarsa(data, nS, nA, gamma, args, initial_Q=None, l2_factor=None, l1_factor=None):
"""
Runs TD iterations on data set of samples from unknown policy to estimate the Q-function of this policy
using SARSA algorithm
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
nS: number of states
nA: number of actions
gamma: Discount factor
Returns:
Q_est [S x A] The estimated Q-function for a fixed policy pi, i,e. the the expected discounted return when following pi starting from some state and action
"""
gammaEval = args.gammaEval
# Initialization:
if initial_Q is None:
Q_est = set_initial_value(args, (nS, nA), gamma)
else:
Q_est = initial_Q
# prev_V = V_pi.copy()
# stop_diff = 1e-5 # stopping condition
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
n_samples = len(data_tuples)
for i_iter in range(args.n_TD_iter):
alpha = set_learning_rate(i_iter, args, gamma)
# Choose random sample:
i_sample = randrange(n_samples)
(s, a, r, s_next, a_next) = data_tuples[i_sample]
if args.use_reward_scaling:
r *= gamma / gammaEval
delta = r + gamma * Q_est[s_next, a_next] - Q_est[s, a]
Q_est[s, a] += alpha * delta
# Add the gradient of the added regularization term:
if l2_factor is not None:
reg_grad = 2 * l2_factor * Q_est # gradient of the L2 regularizer [tabular case]
Q_est -= alpha * reg_grad
if l1_factor is not None:
reg_grad = 1 * l1_factor * np.sign(Q_est) # gradient of the L2 regularizer [tabular case]
Q_est -= alpha * reg_grad
# end for i_iter
return Q_est
# ------------------------------------------------------------------------------------------------------------~
def run_expected_sarsa(data, pi, gamma, args, initial_Q=None, l2_factor=None, l1_factor=None):
"""
Runs TD iterations on data set of samples from given policy to estimate the Q-function of this policy
using Expected-SARSA algorithm
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
pi: the policy that generated that generated the data
S: number of states
A: number of actions
gamma: Discount factor
Returns:
Q_est [S x A] The estimated Q-function for the fixed policy pi, i,e. the the expected discounted return when following pi starting from some state and action
"""
if pi.ndim != 2:
raise AssertionError('Invalid input')
S = pi.shape[0]
A = pi.shape[1]
gammaEval = args.gammaEval
# Initialization:
if initial_Q is None:
Q_est = set_initial_value(args, (S, A), gamma)
else:
Q_est = initial_Q
# prev_V = V_pi.copy()
# stop_diff = 1e-5 # stopping condition
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
n_samples = len(data_tuples)
for i_iter in range(args.n_TD_iter):
alpha = set_learning_rate(i_iter, args, gamma)
# Choose random sample:
i_sample = randrange(n_samples)
(s, a, r, s_next, a_next) = data_tuples[i_sample]
if args.use_reward_scaling:
r *= gamma / gammaEval
V_next = np.dot(Q_est[s_next, :], pi[s_next, :])
delta = r + gamma * V_next - Q_est[s, a]
Q_est[s, a] += alpha * delta
# Add the gradient of the added regularization term:
if l2_factor is not None:
reg_grad = 2 * l2_factor * Q_est # gradient of the L2 regularizer [tabular case]
Q_est -= alpha * reg_grad
if l1_factor is not None:
reg_grad = 1 * l1_factor * np.sign(Q_est) # gradient of the L2 regularizer [tabular case]
Q_est -= alpha * reg_grad
# end for i_iter
return Q_est
# ------------------------------------------------------------------------------------------------------------~
def LSTDQ(data, pi, gamma, args, l2_factor=None, l1_factor=None):
"""
given policy to estimate the Q-function of this policy
using LSTDQ algorithm
"Least-squares policy iteration, <NAME>, <NAME> - Journal of machine learning research, 2003"
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
pi: the policy that generated that generated the data
S: number of states
A: number of actions
gamma: Discount factor
Returns:
Q_est [S x A] The estimated Q-function for the fixed policy pi, i,e. the the expected discounted return when following pi starting from some state and action
"""
if l1_factor is not None:
raise AssertionError('Not supported yet')
if l2_factor is None:
l2_factor = args.default_l2_factor # to prevent Singular matrix
if pi.ndim != 2:
raise AssertionError('Invalid input')
nS = pi.shape[0]
nA = pi.shape[1]
gammaEval = args.gammaEval
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
n_samples = len(data_tuples)
n_feat = nS * nA
Amat = np.zeros((n_feat, n_feat))
bmat = np.zeros((n_feat, 1))
for i_samp in range(n_samples):
(s, a, r, s_next, a_next) = data_tuples[i_samp]
if args.use_reward_scaling:
r *= gamma / gammaEval
raise AssertionError('use_reward_scaling should be False with LSTDQ')
ind1 = s_a_to_ind(s, a, nS, nA)
Amat[ind1, ind1] += 1
bmat[ind1] += r
if args.method == 'LSTDQ':
# SARSA style update
ind2 = s_a_to_ind(s_next, a_next, nS, nA)
Amat[ind1, ind2] -= gamma
elif args.method == 'ELSTDQ':
# Expected SARSA style update
for a_prime in range(nA):
ind2 = s_a_to_ind(s_next, a_prime, nS, nA)
Amat[ind1, ind2] -= gamma * pi[s, a_prime]
else:
raise AssertionError
Qest_vec = np.linalg.solve(Amat + l2_factor * np.eye(n_feat), bmat)
Q_est = np.reshape(Qest_vec, (nS, nA))
return Q_est
# ------------------------------------------------------------------------------------------------------------~
def s_a_to_ind(s, a, nS, nA):
ind = s * nA + a
return ind
# ------------------------------------------------------------------------------------------------------------~
def ind_to_s_a(ind, nS, nA):
s = ind // nA
a = ind % nA
return s, a
# ------------------------------------------------------------------------------------------------------------~
def LSTDQ_nested(data, pi, gamma, args, l2_factor=None, l1_factor=None):
"""
given policy to estimate the Q-function of this policy
using LSTDQ algorithm
"Least-squares policy iteration, <NAME>, <NAME> - Journal of machine learning research, 2003"
based on:
regularized least-squares temporal difference learning with nested ℓ 2 and ℓ 1 penalizationMW Hoffman, <NAME>, <NAME>, <NAME> - European Workshop on …, 2011
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
pi: the policy that generated that generated the data
S: number of states
A: number of actions
gamma: Discount factor
Returns:
Q_est [S x A] The estimated Q-function for the fixed policy pi, i,e. the the expected discounted return when following pi starting from some state and action
"""
if l1_factor is not None:
raise AssertionError('Not supported yet')
if l2_factor is None:
l2_factor = args.default_l2_factor # to prevent Singular matrix
if pi.ndim != 2:
raise AssertionError('Invalid input')
nS = pi.shape[0]
nA = pi.shape[1]
gammaEval = args.gammaEval
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
n_samples = len(data_tuples)
n_feat = nS * nA + 1 # +1 for bias
I = np.eye((n_feat))
Phi = np.zeros((n_samples, n_feat)) # For each sample: feature of current state
PhiPrime = np.zeros((n_samples, n_feat)) # For each sample: feature of next state
R = np.zeros((n_samples, 1)) # For each sample: reward
for i_samp in range(n_samples):
(s, a, r, s_next, a_next) = data_tuples[i_samp]
if args.use_reward_scaling:
r *= gamma / gammaEval
ind1 = s_a_to_ind(s, a, nS, nA)
Phi[i_samp, ind1] = 1.
Phi[i_samp, -1] = 1. # for bias
PhiPrime[i_samp, -1] = 1. # for bias
R[i_samp] = r
if args.method == 'LSTDQ_nested':
# SARSA style update
ind2 = s_a_to_ind(s_next, a_next, nS, nA)
PhiPrime[i_samp, ind2] = 1.
elif args.method == 'ELSTDQ_nested':
# Expected SARSA style update
for a_prime in range(nA):
ind2 = s_a_to_ind(s_next, a_prime, nS, nA)
PhiPrime[i_samp, ind2] = pi[s, a_prime]
else:
raise AssertionError
l2_proj = l2_fp = l2_factor
PhiBar = Phi.mean(axis=0) # features means
PhiPrimeBar = PhiPrime.mean(axis=0) # features means
RBar = R.mean(axis=0)
PhiTilde = Phi - PhiBar
PhiPrimeTilde = PhiPrime - PhiPrimeBar
Rtilde = R - RBar
sigmaPhi = PhiTilde.std(axis=0)
sigmaPhi[sigmaPhi == 0] = 1.
PhiHat = PhiTilde / sigmaPhi
SigmaMat = PhiHat @ np.linalg.inv(PhiHat.T @ PhiHat + l2_proj * np.eye(n_feat)) @ PhiHat.T
Xmat = Phi - gamma * SigmaMat @ PhiPrimeTilde - gamma * matlib.repmat(PhiPrimeBar, n_samples, 1)
yMat = SigmaMat @ Rtilde + matlib.repmat(RBar, n_samples, 1)
Amat = Xmat.T @ Xmat + l2_fp * np.eye(n_feat)
bmat = Xmat.T @ yMat
theta_vec = np.linalg.solve(Amat, bmat)
Qest_vec = theta_vec[:-1] + theta_vec[-1] # add bias term ... Q(s,a) = (phi.T @ theta)_{s,a} = theta[s,a] + theta[-1]
Q_est = np.reshape(Qest_vec, (nS, nA))
return Q_est
# ------------------------------------------------------------------------------------------------------------~
def LSTD(data, gamma, args, l2_factor):
"""
given policy to estimate the Q-function of this policy
using LSTDQ algorithm
Parameters:
data: list of n trajectories, each is a list of sequence of depth tuples (state, action, reward, next state)
pi: the policy that generated that generated the data
S: number of states
A: number of actions
gamma: Discount factor
Returns:
V_est [nS x 1] The estimated Q-function for the fixed policy pi, i,e. the the expected discounted return when following pi starting from some state and action
"""
# Join list of data tuples from all trajectories:
data_tuples = sum(data, [])
nS = args.nS
gammaEval = args.gammaEval
n_samples = len(data_tuples)
n_feat = nS
Amat = np.zeros((n_feat, n_feat))
bmat = | np.zeros((n_feat, 1)) | numpy.zeros |
import builtins
from collections import OrderedDict
from distutils.version import LooseVersion
import base64
import os
from functools import wraps
import matplotlib
import numpy as np
from .base_plot_types import \
PlotMPL, ImagePlotMPL
from .plot_container import \
ImagePlotContainer, \
log_transform, linear_transform, get_log_minorticks, \
validate_plot, invalidate_plot
from yt.data_objects.profiles import \
create_profile
from yt.data_objects.static_output import \
Dataset
from yt.data_objects.data_containers import \
YTSelectionContainer
from yt.frontends.ytdata.data_structures import \
YTProfileDataset
from yt.utilities.exceptions import \
YTNotInsideNotebook
from yt.utilities.logger import ytLogger as mylog
from yt.funcs import \
ensure_list, \
get_image_suffix, \
matplotlib_style_context, \
iterable
MPL_VERSION = LooseVersion(matplotlib.__version__)
def get_canvas(name):
from . import _mpl_imports as mpl
suffix = get_image_suffix(name)
if suffix == '':
suffix = '.png'
if suffix == ".png":
canvas_cls = mpl.FigureCanvasAgg
elif suffix == ".pdf":
canvas_cls = mpl.FigureCanvasPdf
elif suffix in (".eps", ".ps"):
canvas_cls = mpl.FigureCanvasPS
else:
mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
canvas_cls = mpl.FigureCanvasAgg
return canvas_cls
def invalidate_profile(f):
@wraps(f)
def newfunc(*args, **kwargs):
rv = f(*args, **kwargs)
args[0]._profile_valid = False
return rv
return newfunc
class PlotContainerDict(OrderedDict):
def __missing__(self, key):
plot = PlotMPL((10, 8), [0.1, 0.1, 0.8, 0.8], None, None)
self[key] = plot
return self[key]
class FigureContainer(OrderedDict):
def __init__(self, plots):
self.plots = plots
super(FigureContainer, self).__init__()
def __missing__(self, key):
self[key] = self.plots[key].figure
return self[key]
def __iter__(self):
return iter(self.plots)
class AxesContainer(OrderedDict):
def __init__(self, plots):
self.plots = plots
self.ylim = {}
self.xlim = (None, None)
super(AxesContainer, self).__init__()
def __missing__(self, key):
self[key] = self.plots[key].axes
return self[key]
def __setitem__(self, key, value):
super(AxesContainer, self).__setitem__(key, value)
self.ylim[key] = (None, None)
def sanitize_label(label, nprofiles):
label = ensure_list(label)
if len(label) == 1:
label = label * nprofiles
if len(label) != nprofiles:
raise RuntimeError("Number of labels must match number of profiles")
for l in label:
if l is not None and not isinstance(l, str):
raise RuntimeError("All labels must be None or a string")
return label
def data_object_or_all_data(data_source):
if isinstance(data_source, Dataset):
data_source = data_source.all_data()
if not isinstance(data_source, YTSelectionContainer):
raise RuntimeError("data_source must be a yt selection data object")
return data_source
class ProfilePlot:
r"""
Create a 1d profile plot from a data source or from a list
of profile objects.
Given a data object (all_data, region, sphere, etc.), an x field,
and a y field (or fields), this will create a one-dimensional profile
of the average (or total) value of the y field in bins of the x field.
This can be used to create profiles from given fields or to plot
multiple profiles created from
`yt.data_objects.profiles.create_profile`.
Parameters
----------
data_source : YTSelectionContainer Object
The data object to be profiled, such as all_data, region, or
sphere. If a dataset is passed in instead, an all_data data object
is generated internally from the dataset.
x_field : str
The binning field for the profile.
y_fields : str or list
The field or fields to be profiled.
weight_field : str
The weight field for calculating weighted averages. If None,
the profile values are the sum of the field values within the bin.
Otherwise, the values are a weighted average.
Default : "cell_mass".
n_bins : int
The number of bins in the profile.
Default: 64.
accumulation : bool
If True, the profile values for a bin N are the cumulative sum of
all the values from bin 0 to N.
Default: False.
fractional : If True the profile values are divided by the sum of all
the profile data such that the profile represents a probability
distribution function.
label : str or list of strings
If a string, the label to be put on the line plotted. If a list,
this should be a list of labels for each profile to be overplotted.
Default: None.
plot_spec : dict or list of dicts
A dictionary or list of dictionaries containing plot keyword
arguments. For example, dict(color="red", linestyle=":").
Default: None.
x_log : bool
If not None, whether the x_axis should be plotted with a logarithmic
scaling.
Default: None
y_log : dict
A dictionary containing field:boolean pairs, setting the logarithmic
property for that field. May be overridden after instantiation using
set_log.
Default: None
Examples
--------
This creates profiles of a single dataset.
>>> import yt
>>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
>>> ad = ds.all_data()
>>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
... weight_field="cell_mass",
... plot_spec=dict(color='red', linestyle="--"))
>>> plot.save()
This creates profiles from a time series object.
>>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
>>> es.get_time_series()
>>> profiles = []
>>> labels = []
>>> plot_specs = []
>>> for ds in es[-4:]:
... ad = ds.all_data()
... profiles.append(create_profile(ad, ["density"],
... fields=["temperature",
... "velocity_x"]))
... labels.append(ds.current_redshift)
... plot_specs.append(dict(linestyle="--", alpha=0.7))
>>>
>>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
... plot_specs=plot_specs)
>>> plot.save()
Use set_line_property to change line properties of one or all profiles.
"""
x_log = None
y_log = None
x_title = None
y_title = None
_plot_valid = False
def __init__(self, data_source, x_field, y_fields,
weight_field="cell_mass", n_bins=64,
accumulation=False, fractional=False,
label=None, plot_spec=None,
x_log=None, y_log=None):
data_source = data_object_or_all_data(data_source)
if x_log is None:
logs = None
else:
logs = {x_field:x_log}
if isinstance(data_source.ds, YTProfileDataset):
profiles = [data_source.ds.profile]
else:
profiles = [create_profile(data_source, [x_field],
n_bins=[n_bins],
fields=ensure_list(y_fields),
weight_field=weight_field,
accumulation=accumulation,
fractional=fractional,
logs=logs)]
if plot_spec is None:
plot_spec = [dict() for p in profiles]
if not isinstance(plot_spec, list):
plot_spec = [plot_spec.copy() for p in profiles]
ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)
@validate_plot
def save(self, name=None, suffix=None, mpl_kwargs=None):
r"""
Saves a 1d profile plot.
Parameters
----------
name : str
The output file keyword.
suffix : string
Specify the image type by its suffix. If not specified, the output
type will be inferred from the filename. Defaults to PNG.
mpl_kwargs : dict
A dict of keyword arguments to be passed to matplotlib.
"""
if not self._plot_valid:
self._setup_plots()
unique = set(self.plots.values())
if len(unique) < len(self.plots):
iters = zip(range(len(unique)), sorted(unique))
else:
iters = self.plots.items()
if not suffix:
suffix = "png"
suffix = ".%s" % suffix
fullname = False
if name is None:
if len(self.profiles) == 1:
prefix = self.profiles[0].ds
else:
prefix = "Multi-data"
name = "%s%s" % (prefix, suffix)
else:
sfx = get_image_suffix(name)
if sfx != '':
suffix = sfx
prefix = name[:name.rfind(suffix)]
fullname = True
else:
prefix = name
xfn = self.profiles[0].x_field
if isinstance(xfn, tuple):
xfn = xfn[1]
fns = []
for uid, plot in iters:
if isinstance(uid, tuple):
uid = uid[1]
if fullname:
fns.append("%s%s" % (prefix, suffix))
else:
fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
mylog.info("Saving %s", fns[-1])
with matplotlib_style_context():
plot.save(fns[-1], mpl_kwargs=mpl_kwargs)
return fns
@validate_plot
def show(self):
r"""This will send any existing plots to the IPython notebook.
If yt is being run from within an IPython session, and it is able to
determine this, this function will send any existing plots to the
notebook for display.
If yt can't determine if it's inside an IPython session, it will raise
YTNotInsideNotebook.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
>>> pp.show()
"""
if "__IPYTHON__" in dir(builtins):
from IPython.display import display
display(self)
else:
raise YTNotInsideNotebook
@validate_plot
def _repr_html_(self):
"""Return an html representation of the plot object. Will display as a
png for each WindowPlotMPL instance in self.plots"""
ret = ''
unique = set(self.plots.values())
if len(unique) < len(self.plots):
iters = zip(range(len(unique)), sorted(unique))
else:
iters = self.plots.items()
for uid, plot in iters:
with matplotlib_style_context():
img = plot._repr_png_()
img = base64.b64encode(img).decode()
ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
r'src="data:image/png;base64,{0}"><br>'.format(img)
return ret
def _setup_plots(self):
if self._plot_valid:
return
for f in self.axes:
self.axes[f].cla()
if f in self._plot_text:
self.plots[f].axes.text(self._text_xpos[f], self._text_ypos[f],
self._plot_text[f],
fontproperties=self._font_properties,
**self._text_kwargs[f])
for i, profile in enumerate(self.profiles):
for field, field_data in profile.items():
self.axes[field].plot(np.array(profile.x), np.array(field_data),
label=self.label[i], **self.plot_spec[i])
for profile in self.profiles:
for fname in profile.keys():
axes = self.axes[fname]
xscale, yscale = self._get_field_log(fname, profile)
xtitle, ytitle = self._get_field_title(fname, profile)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_ylabel(ytitle)
axes.set_xlabel(xtitle)
axes.set_ylim(*self.axes.ylim[fname])
axes.set_xlim(*self.axes.xlim)
if fname in self._plot_title:
axes.set_title(self._plot_title[fname])
if any(self.label):
axes.legend(loc="best")
self._set_font_properties()
self._plot_valid = True
@classmethod
def _initialize_instance(cls, obj, profiles, labels, plot_specs, y_log):
obj._plot_title = {}
obj._plot_text = {}
obj._text_xpos = {}
obj._text_ypos = {}
obj._text_kwargs = {}
from matplotlib.font_manager import FontProperties
obj._font_properties = FontProperties(family='stixgeneral', size=18)
obj._font_color = None
obj.profiles = ensure_list(profiles)
obj.x_log = None
obj.y_log = {}
if y_log is not None:
for field, log in y_log.items():
field, = obj.profiles[0].data_source._determine_fields([field])
obj.y_log[field] = log
obj.y_title = {}
obj.x_title = None
obj.label = sanitize_label(labels, len(obj.profiles))
if plot_specs is None:
plot_specs = [dict() for p in obj.profiles]
obj.plot_spec = plot_specs
obj.plots = PlotContainerDict()
obj.figures = FigureContainer(obj.plots)
obj.axes = AxesContainer(obj.plots)
obj._setup_plots()
return obj
@classmethod
def from_profiles(cls, profiles, labels=None, plot_specs=None, y_log=None):
r"""
Instantiate a ProfilePlot object from a list of profiles
created with :func:`~yt.data_objects.profiles.create_profile`.
Parameters
----------
profiles : a profile or list of profiles
A single profile or list of profile objects created with
:func:`~yt.data_objects.profiles.create_profile`.
labels : list of strings
A list of labels for each profile to be overplotted.
Default: None.
plot_specs : list of dicts
A list of dictionaries containing plot keyword
arguments. For example, [dict(color="red", linestyle=":")].
Default: None.
Examples
--------
>>> from yt import simulation
>>> es = simulation("AMRCosmology.enzo", "Enzo")
>>> es.get_time_series()
>>> profiles = []
>>> labels = []
>>> plot_specs = []
>>> for ds in es[-4:]:
... ad = ds.all_data()
... profiles.append(create_profile(ad, ["Density"],
... fields=["Temperature",
... "x-velocity"]))
... labels.append(ds.current_redshift)
... plot_specs.append(dict(linestyle="--", alpha=0.7))
>>>
>>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
... plot_specs=plot_specs)
>>> plot.save()
"""
if labels is not None and len(profiles) != len(labels):
raise RuntimeError("Profiles list and labels list must be the same size.")
if plot_specs is not None and len(plot_specs) != len(profiles):
raise RuntimeError("Profiles list and plot_specs list must be the same size.")
obj = cls.__new__(cls)
return cls._initialize_instance(obj, profiles, labels, plot_specs, y_log)
@invalidate_plot
def set_line_property(self, property, value, index=None):
r"""
Set properties for one or all lines to be plotted.
Parameters
----------
property : str
The line property to be set.
value : str, int, float
The value to set for the line property.
index : int
The index of the profile in the list of profiles to be
changed. If None, change all plotted lines.
Default : None.
Examples
--------
Change all the lines in a plot
plot.set_line_property("linestyle", "-")
Change a single line.
plot.set_line_property("linewidth", 4, index=0)
"""
if index is None:
specs = self.plot_spec
else:
specs = [self.plot_spec[index]]
for spec in specs:
spec[property] = value
return self
@invalidate_plot
def set_log(self, field, log):
"""set a field to log or linear.
Parameters
----------
field : string
the field to set a transform
log : boolean
Log on/off.
"""
if field == "all":
self.x_log = log
for field in list(self.profiles[0].field_data.keys()):
self.y_log[field] = log
else:
field, = self.profiles[0].data_source._determine_fields([field])
if field == self.profiles[0].x_field:
self.x_log = log
elif field in self.profiles[0].field_data:
self.y_log[field] = log
else:
raise KeyError("Field %s not in profile plot!" % (field))
return self
@invalidate_plot
def set_ylabel(self, field, label):
"""Sets a new ylabel for the specified fields
Parameters
----------
field : string
The name of the field that is to be changed.
label : string
The label to be placed on the y-axis
"""
if field == "all":
for field in self.profiles[0].field_data:
self.y_title[field] = label
else:
field, = self.profiles[0].data_source._determine_fields([field])
if field in self.profiles[0].field_data:
self.y_title[field] = label
else:
raise KeyError("Field %s not in profile plot!" % (field))
return self
@invalidate_plot
def set_xlabel(self, label):
"""Sets a new xlabel for all profiles
Parameters
----------
label : string
The label to be placed on the x-axis
"""
self.x_title = label
return self
@invalidate_plot
def set_unit(self, field, unit):
"""Sets a new unit for the requested field
Parameters
----------
field : string
The name of the field that is to be changed.
new_unit : string or Unit object
The name of the new unit.
"""
fd = self.profiles[0].data_source._determine_fields(field)[0]
for profile in self.profiles:
if fd == profile.x_field:
profile.set_x_unit(unit)
elif fd[1] in self.profiles[0].field_map:
profile.set_field_unit(field, unit)
else:
raise KeyError("Field %s not in profile plot!" % (field))
return self
@invalidate_plot
def set_xlim(self, xmin=None, xmax=None):
"""Sets the limits of the bin field
Parameters
----------
xmin : float or None
The new x minimum. Defaults to None, which leaves the xmin
unchanged.
xmax : float or None
The new x maximum. Defaults to None, which leaves the xmax
unchanged.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
>>> pp.set_xlim(1e-29, 1e-24)
>>> pp.save()
"""
self.axes.xlim = (xmin, xmax)
for i, p in enumerate(self.profiles):
if xmin is None:
xmi = p.x_bins.min()
else:
xmi = xmin
if xmax is None:
xma = p.x_bins.max()
else:
xma = xmax
extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
units = {p.x_field: str(p.x.units)}
if self.x_log is None:
logs = None
else:
logs = {p.x_field: self.x_log}
for field in p.field_map.values():
units[field] = str(p.field_data[field].units)
self.profiles[i] = \
create_profile(p.data_source, p.x_field,
n_bins=len(p.x_bins)-1,
fields=list(p.field_map.values()),
weight_field=p.weight_field,
accumulation=p.accumulation,
fractional=p.fractional,
logs=logs,
extrema=extrema, units=units)
return self
@invalidate_plot
def set_ylim(self, field, ymin=None, ymax=None):
"""Sets the plot limits for the specified field we are binning.
Parameters
----------
field : string or field tuple
The field that we want to adjust the plot limits for.
ymin : float or None
The new y minimum. Defaults to None, which leaves the ymin
unchanged.
ymax : float or None
The new y maximum. Defaults to None, which leaves the ymax
unchanged.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
>>> pp.set_ylim('temperature', 1e4, 1e6)
>>> pp.save()
"""
if field == 'all':
fields = list(self.axes.keys())
else:
fields = ensure_list(field)
for profile in self.profiles:
for field in profile.data_source._determine_fields(fields):
if field in profile.field_map:
field = profile.field_map[field]
self.axes.ylim[field] = (ymin, ymax)
# Continue on to the next profile.
break
return self
def _set_font_properties(self):
for f in self.plots:
self.plots[f]._set_font_properties(
self._font_properties, self._font_color)
def _get_field_log(self, field_y, profile):
yfi = profile.field_info[field_y]
if self.x_log is None:
x_log = profile.x_log
else:
x_log = self.x_log
if field_y in self.y_log:
y_log = self.y_log[field_y]
else:
y_log = yfi.take_log
scales = {True: 'log', False: 'linear'}
return scales[x_log], scales[y_log]
def _get_field_label(self, field, field_info, field_unit, fractional=False):
field_unit = field_unit.latex_representation()
field_name = field_info.display_name
if isinstance(field, tuple): field = field[1]
if field_name is None:
field_name = r'$\rm{'+field+r'}$'
field_name = r'$\rm{'+field.replace('_','\ ').title()+r'}$'
elif field_name.find('$') == -1:
field_name = field_name.replace(' ','\ ')
field_name = r'$\rm{'+field_name+r'}$'
if fractional:
label = field_name + r'$\rm{\ Probability\ Density}$'
elif field_unit is None or field_unit == '':
label = field_name
else:
label = field_name+r'$\ \ ('+field_unit+r')$'
return label
def _get_field_title(self, field_y, profile):
field_x = profile.x_field
xfi = profile.field_info[field_x]
yfi = profile.field_info[field_y]
x_unit = profile.x.units
y_unit = profile.field_units[field_y]
fractional = profile.fractional
x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)
y_title = self.y_title.get(field_y, None) or \
self._get_field_label(field_y, yfi, y_unit, fractional)
return (x_title, y_title)
@invalidate_plot
def annotate_title(self, title, field='all'):
r"""Set a title for the plot.
Parameters
----------
title : str
The title to add.
field : str or list of str
The field name for which title needs to be set.
Examples
--------
>>> # To set title for all the fields:
>>> plot.annotate_title("This is a Profile Plot")
>>> # To set title for specific fields:
>>> plot.annotate_title("Profile Plot for Temperature", "temperature")
>>> # Setting same plot title for both the given fields
>>> plot.annotate_title("Profile Plot: Temperature-Dark Matter Density",
["temperature", "dark_matter_density"])
"""
if field == 'all':
fields = list(self.axes.keys())
else:
fields = ensure_list(field)
for profile in self.profiles:
for field in profile.data_source._determine_fields(fields):
if field in profile.field_map:
field = profile.field_map[field]
self._plot_title[field] = title
return self
@invalidate_plot
def annotate_text(self, xpos=0.0, ypos=0.0, text=None, field='all', **text_kwargs):
r"""Allow the user to insert text onto the plot
The x-position and y-position must be given as well as the text string.
Add *text* to plot at location *xpos*, *ypos* in plot coordinates for
the given fields or by default for all fields.
(see example below).
Parameters
----------
xpos : float
Position on plot in x-coordinates.
ypos : float
Position on plot in y-coordinates.
text : str
The text to insert onto the plot.
field : str or tuple
The name of the field to add text to.
text_kwargs : dict
Dictionary of text keyword arguments to be passed to matplotlib
>>> import yt
>>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> my_galaxy = ds.disk(ds.domain_center, [0.0, 0.0, 1.0], 10*kpc, 3*kpc)
>>> plot = yt.ProfilePlot(my_galaxy, "density", ["temperature"])
>>> # Annotate text for all the fields
>>> plot.annotate_text(1e-26, 1e5, "This is annotated text in the plot area.")
>>> plot.save()
>>> # Annotate text for a given field
>>> plot.annotate_text(1e-26, 1e5, "Annotated text", "Temperature")
>>> plot.save()
>>> # Annotate text for multiple fields
>>> fields = ["temperature", "density"]
>>> plot.annotate_text(1e-26, 1e5, "Annotated text", fields)
>>> plot.save()
"""
if field == 'all':
fields = list(self.axes.keys())
else:
fields = ensure_list(field)
for profile in self.profiles:
for field in profile.data_source._determine_fields(fields):
if field in profile.field_map:
field = profile.field_map[field]
self._plot_text[field] = text
self._text_xpos[field] = xpos
self._text_ypos[field] = ypos
self._text_kwargs[field] = text_kwargs
return self
class PhasePlot(ImagePlotContainer):
r"""
Create a 2d profile (phase) plot from a data source or from
profile object created with
`yt.data_objects.profiles.create_profile`.
Given a data object (all_data, region, sphere, etc.), an x field,
y field, and z field (or fields), this will create a two-dimensional
profile of the average (or total) value of the z field in bins of the
x and y fields.
Parameters
----------
data_source : YTSelectionContainer Object
The data object to be profiled, such as all_data, region, or
sphere. If a dataset is passed in instead, an all_data data object
is generated internally from the dataset.
x_field : str
The x binning field for the profile.
y_field : str
The y binning field for the profile.
z_fields : str or list
The field or fields to be profiled.
weight_field : str
The weight field for calculating weighted averages. If None,
the profile values are the sum of the field values within the bin.
Otherwise, the values are a weighted average.
Default : "cell_mass".
x_bins : int
The number of bins in x field for the profile.
Default: 128.
y_bins : int
The number of bins in y field for the profile.
Default: 128.
accumulation : bool or list of bools
If True, the profile values for a bin n are the cumulative sum of
all the values from bin 0 to n. If -True, the sum is reversed so
that the value for bin n is the cumulative sum from bin N (total bins)
to n. A list of values can be given to control the summation in each
dimension independently.
Default: False.
fractional : If True the profile values are divided by the sum of all
the profile data such that the profile represents a probability
distribution function.
fontsize : int
Font size for all text in the plot.
Default: 18.
figure_size : int
Size in inches of the image.
Default: 8 (8x8)
Examples
--------
>>> import yt
>>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
>>> ad = ds.all_data()
>>> plot = PhasePlot(ad, "density", "temperature", ["cell_mass"],
... weight_field=None)
>>> plot.save()
>>> # Change plot properties.
>>> plot.set_cmap("cell_mass", "jet")
>>> plot.set_zlim("cell_mass", 1e8, 1e13)
>>> plot.annotate_title("This is a phase plot")
"""
x_log = None
y_log = None
plot_title = None
_plot_valid = False
_profile_valid = False
_plot_type = 'Phase'
_xlim = (None, None)
_ylim = (None, None)
def __init__(self, data_source, x_field, y_field, z_fields,
weight_field="cell_mass", x_bins=128, y_bins=128,
accumulation=False, fractional=False,
fontsize=18, figure_size=8.0):
data_source = data_object_or_all_data(data_source)
if isinstance(data_source.ds, YTProfileDataset):
profile = data_source.ds.profile
else:
profile = create_profile(
data_source,
[x_field, y_field],
ensure_list(z_fields),
n_bins=[x_bins, y_bins],
weight_field=weight_field,
accumulation=accumulation,
fractional=fractional)
type(self)._initialize_instance(self, data_source, profile, fontsize,
figure_size)
@classmethod
def _initialize_instance(cls, obj, data_source, profile, fontsize,
figure_size):
obj.plot_title = {}
obj.z_log = {}
obj.z_title = {}
obj._initfinished = False
obj.x_log = None
obj.y_log = None
obj._plot_text = {}
obj._text_xpos = {}
obj._text_ypos = {}
obj._text_kwargs = {}
obj._profile = profile
obj._profile_valid = True
obj._xlim = (None, None)
obj._ylim = (None, None)
super(PhasePlot, obj).__init__(data_source, figure_size, fontsize)
obj._setup_plots()
obj._initfinished = True
return obj
def _get_field_title(self, field_z, profile):
field_x = profile.x_field
field_y = profile.y_field
xfi = profile.field_info[field_x]
yfi = profile.field_info[field_y]
zfi = profile.field_info[field_z]
x_unit = profile.x.units
y_unit = profile.y.units
z_unit = profile.field_units[field_z]
fractional = profile.fractional
x_label, y_label, z_label = self._get_axes_labels(field_z)
x_title = x_label or self._get_field_label(field_x, xfi, x_unit)
y_title = y_label or self._get_field_label(field_y, yfi, y_unit)
z_title = z_label or self._get_field_label(field_z, zfi, z_unit,
fractional)
return (x_title, y_title, z_title)
def _get_field_label(self, field, field_info, field_unit, fractional=False):
field_unit = field_unit.latex_representation()
field_name = field_info.display_name
if isinstance(field, tuple): field = field[1]
if field_name is None:
field_name = r'$\rm{'+field+r'}$'
field_name = r'$\rm{'+field.replace('_','\ ').title()+r'}$'
elif field_name.find('$') == -1:
field_name = field_name.replace(' ','\ ')
field_name = r'$\rm{'+field_name+r'}$'
if fractional:
label = field_name + r'$\rm{\ Probability\ Density}$'
elif field_unit is None or field_unit == '':
label = field_name
else:
label = field_name+r'$\ \ ('+field_unit+r')$'
return label
def _get_field_log(self, field_z, profile):
zfi = profile.field_info[field_z]
if self.x_log is None:
x_log = profile.x_log
else:
x_log = self.x_log
if self.y_log is None:
y_log = profile.y_log
else:
y_log = self.y_log
if field_z in self.z_log:
z_log = self.z_log[field_z]
else:
z_log = zfi.take_log
scales = {True: 'log', False: 'linear'}
return scales[x_log], scales[y_log], scales[z_log]
def _recreate_frb(self):
# needed for API compatibility with PlotWindow
pass
@property
def profile(self):
if not self._profile_valid:
self._recreate_profile()
return self._profile
@property
def fields(self):
return list(self.plots.keys())
def _setup_plots(self):
if self._plot_valid:
return
for f, data in self.profile.items():
fig = None
axes = None
cax = None
draw_colorbar = True
draw_axes = True
zlim = (None, None)
xlim = self._xlim
ylim = self._ylim
if f in self.plots:
draw_colorbar = self.plots[f]._draw_colorbar
draw_axes = self.plots[f]._draw_axes
zlim = (self.plots[f].zmin, self.plots[f].zmax)
if self.plots[f].figure is not None:
fig = self.plots[f].figure
axes = self.plots[f].axes
cax = self.plots[f].cax
x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
x_title, y_title, z_title = self._get_field_title(f, self.profile)
if zlim == (None, None):
if z_scale == 'log':
positive_values = data[data > 0.0]
if len(positive_values) == 0:
mylog.warning("Profiled field %s has no positive "
"values. Max = %f." %
(f, np.nanmax(data)))
mylog.warning("Switching to linear colorbar scaling.")
zmin = np.nanmin(data)
z_scale = 'linear'
self._field_transform[f] = linear_transform
else:
zmin = positive_values.min()
self._field_transform[f] = log_transform
else:
zmin = np.nanmin(data)
self._field_transform[f] = linear_transform
zlim = [zmin, np.nanmax(data)]
font_size = self._font_properties.get_size()
f = self.profile.data_source._determine_fields(f)[0]
# if this is a Particle Phase Plot AND if we using a single color,
# override the colorbar here.
splat_color = getattr(self, "splat_color", None)
if splat_color is not None:
cmap = matplotlib.colors.ListedColormap(splat_color, 'dummy')
else:
cmap = self._colormaps[f]
self.plots[f] = PhasePlotMPL(self.profile.x, self.profile.y, data,
x_scale, y_scale, z_scale,
cmap, zlim,
self.figure_size, font_size,
fig, axes, cax)
self.plots[f]._toggle_axes(draw_axes)
self.plots[f]._toggle_colorbar(draw_colorbar)
self.plots[f].axes.xaxis.set_label_text(x_title)
self.plots[f].axes.yaxis.set_label_text(y_title)
self.plots[f].cax.yaxis.set_label_text(z_title)
self.plots[f].axes.set_xlim(xlim)
self.plots[f].axes.set_ylim(ylim)
color = self._background_color[f]
if MPL_VERSION < LooseVersion("2.0.0"):
self.plots[f].axes.set_axis_bgcolor(color)
else:
self.plots[f].axes.set_facecolor(color)
if f in self._plot_text:
self.plots[f].axes.text(self._text_xpos[f], self._text_ypos[f],
self._plot_text[f],
fontproperties=self._font_properties,
**self._text_kwargs[f])
if f in self.plot_title:
self.plots[f].axes.set_title(self.plot_title[f])
# x-y axes minorticks
if f not in self._minorticks:
self._minorticks[f] = True
if self._minorticks[f]:
self.plots[f].axes.minorticks_on()
else:
self.plots[f].axes.minorticks_off()
# colorbar minorticks
if f not in self._cbar_minorticks:
self._cbar_minorticks[f] = True
if self._cbar_minorticks[f]:
if self._field_transform[f] == linear_transform:
self.plots[f].cax.minorticks_on()
elif MPL_VERSION < LooseVersion("3.0.0"):
# before matplotlib 3 log-scaled colorbars internally used
# a linear scale going from zero to one and did not draw
# minor ticks. Since we want minor ticks, calculate
# where the minor ticks should go in this linear scale
# and add them manually.
vmin = np.float64(self.plots[f].cb.norm.vmin)
vmax = np.float64(self.plots[f].cb.norm.vmax)
mticks = self.plots[f].image.norm(
get_log_minorticks(vmin, vmax))
self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)
else:
self.plots[f].cax.minorticks_off()
self._set_font_properties()
# if this is a particle plot with one color only, hide the cbar here
if hasattr(self, "use_cbar") and not self.use_cbar:
self.plots[f].hide_colorbar()
self._plot_valid = True
@classmethod
def from_profile(cls, profile, fontsize=18, figure_size=8.0):
r"""
Instantiate a PhasePlot object from a profile object created
with :func:`~yt.data_objects.profiles.create_profile`.
Parameters
----------
profile : An instance of :class:`~yt.data_objects.profiles.ProfileND`
A single profile object.
fontsize : float
The fontsize to use, in points.
figure_size : float
The figure size to use, in inches.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> extrema = {
... 'density': (1e-31, 1e-24),
... 'temperature': (1e1, 1e8),
... 'cell_mass': (1e-6, 1e-1),
... }
>>> profile = yt.create_profile(ds.all_data(), ['density', 'temperature'],
... fields=['cell_mass'],extrema=extrema,
... fractional=True)
>>> ph = yt.PhasePlot.from_profile(profile)
>>> ph.save()
"""
obj = cls.__new__(cls)
data_source = profile.data_source
return cls._initialize_instance(obj, data_source, profile, fontsize,
figure_size)
def annotate_text(self, xpos=0.0, ypos=0.0, text=None, **text_kwargs):
r"""
Allow the user to insert text onto the plot
The x-position and y-position must be given as well as the text string.
Add *text* tp plot at location *xpos*, *ypos* in plot coordinates
(see example below).
Parameters
----------
field : str or tuple
The name of the field to add text to.
xpos : float
Position on plot in x-coordinates.
ypos : float
Position on plot in y-coordinates.
text : str
The text to insert onto the plot.
text_kwargs : dict
Dictionary of text keyword arguments to be passed to matplotlib
>>> plot.annotate_text(1e-15, 5e4, "Hello YT")
"""
for f in self.data_source._determine_fields(list(self.plots.keys())):
if self.plots[f].figure is not None and text is not None:
self.plots[f].axes.text(xpos, ypos, text,
fontproperties=self._font_properties,
**text_kwargs)
self._plot_text[f] = text
self._text_xpos[f] = xpos
self._text_ypos[f] = ypos
self._text_kwargs[f] = text_kwargs
return self
@validate_plot
def save(self, name=None, suffix=None, mpl_kwargs=None):
r"""
Saves a 2d profile plot.
Parameters
----------
name : str
The output file keyword.
suffix : string
Specify the image type by its suffix. If not specified, the output
type will be inferred from the filename. Defaults to PNG.
mpl_kwargs : dict
A dict of keyword arguments to be passed to matplotlib.
>>> plot.save(mpl_kwargs={'bbox_inches':'tight'})
"""
names = []
if not self._plot_valid:
self._setup_plots()
if mpl_kwargs is None:
mpl_kwargs = {}
if name is None:
name = str(self.profile.ds)
name = os.path.expanduser(name)
xfn = self.profile.x_field
yfn = self.profile.y_field
if isinstance(xfn, tuple):
xfn = xfn[1]
if isinstance(yfn, tuple):
yfn = yfn[1]
for f in self.profile.field_data:
_f = f
if isinstance(f, tuple):
_f = _f[1]
middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
splitname = os.path.split(name)
if splitname[0] != '' and not os.path.isdir(splitname[0]):
os.makedirs(splitname[0])
if os.path.isdir(name) and name != str(self.profile.ds):
prefix = name + (os.sep if name[-1] != os.sep else '')
prefix += str(self.profile.ds)
else:
prefix = name
if suffix is None:
suffix = get_image_suffix(name)
if suffix != '':
for k, v in self.plots.items():
names.append(v.save(name, mpl_kwargs))
return names
else:
suffix = "png"
fn = "%s_%s.%s" % (prefix, middle, suffix)
names.append(fn)
self.plots[f].save(fn, mpl_kwargs)
return names
@invalidate_plot
def set_font(self, font_dict=None):
"""
Set the font and font properties.
Parameters
----------
font_dict : dict
A dict of keyword parameters to be passed to
:class:`matplotlib.font_manager.FontProperties`.
Possible keys include:
* family - The font family. Can be serif, sans-serif, cursive,
'fantasy', or 'monospace'.
* style - The font style. Either normal, italic or oblique.
* color - A valid color string like 'r', 'g', 'red', 'cobalt',
and 'orange'.
* variant - Either normal or small-caps.
* size - Either a relative value of xx-small, x-small, small,
medium, large, x-large, xx-large or an absolute font size, e.g. 12
* stretch - A numeric value in the range 0-1000 or one of
ultra-condensed, extra-condensed, condensed, semi-condensed,
normal, semi-expanded, expanded, extra-expanded or ultra-expanded
* weight - A numeric value in the range 0-1000 or one of ultralight,
light, normal, regular, book, medium, roman, semibold, demibold,
demi, bold, heavy, extra bold, or black
See the matplotlib font manager API documentation for more details.
https://matplotlib.org/api/font_manager_api.html
Notes
-----
Mathtext axis labels will only obey the `size` and `color` keyword.
Examples
--------
This sets the font to be 24-pt, blue, sans-serif, italic, and
bold-face.
>>> prof = ProfilePlot(ds.all_data(), 'density', 'temperature')
>>> slc.set_font({'family':'sans-serif', 'style':'italic',
... 'weight':'bold', 'size':24, 'color':'blue'})
"""
from matplotlib.font_manager import FontProperties
if font_dict is None:
font_dict = {}
if 'color' in font_dict:
self._font_color = font_dict.pop('color')
# Set default values if the user does not explicitly set them.
# this prevents reverting to the matplotlib defaults.
font_dict.setdefault('family', 'stixgeneral')
font_dict.setdefault('size', 18)
self._font_properties = \
FontProperties(**font_dict)
return self
@invalidate_plot
def set_title(self, field, title):
"""Set a title for the plot.
Parameters
----------
field : str
The z field of the plot to add the title.
title : str
The title to add.
Examples
--------
>>> plot.set_title("cell_mass", "This is a phase plot")
"""
self.plot_title[self.data_source._determine_fields(field)[0]] = title
return self
@invalidate_plot
def annotate_title(self, title):
"""Set a title for the plot.
Parameters
----------
title : str
The title to add.
Examples
--------
>>> plot.annotate_title("This is a phase plot")
"""
for f in self._profile.field_data:
if isinstance(f, tuple):
f = f[1]
self.plot_title[self.data_source._determine_fields(f)[0]] = title
return self
@invalidate_plot
def reset_plot(self):
self.plots = {}
return self
@invalidate_plot
def set_log(self, field, log):
"""set a field to log or linear.
Parameters
----------
field : string
the field to set a transform
log : boolean
Log on/off.
"""
p = self._profile
if field == "all":
self.x_log = log
self.y_log = log
for field in p.field_data:
self.z_log[field] = log
self._profile_valid = False
else:
field, = self.profile.data_source._determine_fields([field])
if field == p.x_field:
self.x_log = log
self._profile_valid = False
elif field == p.y_field:
self.y_log = log
self._profile_valid = False
elif field in p.field_data:
self.z_log[field] = log
else:
raise KeyError("Field %s not in phase plot!" % (field))
return self
@invalidate_plot
def set_unit(self, field, unit):
"""Sets a new unit for the requested field
Parameters
----------
field : string
The name of the field that is to be changed.
new_unit : string or Unit object
The name of the new unit.
"""
fd = self.data_source._determine_fields(field)[0]
if fd == self.profile.x_field:
self.profile.set_x_unit(unit)
elif fd == self.profile.y_field:
self.profile.set_y_unit(unit)
elif fd in self.profile.field_data.keys():
self.profile.set_field_unit(field, unit)
self.plots[field].zmin, self.plots[field].zmax = (None, None)
else:
raise KeyError("Field %s not in phase plot!" % (field))
return self
@invalidate_plot
@invalidate_profile
def set_xlim(self, xmin=None, xmax=None):
"""Sets the limits of the x bin field
Parameters
----------
xmin : float or None
The new x minimum in the current x-axis units. Defaults to None,
which leaves the xmin unchanged.
xmax : float or None
The new x maximum in the current x-axis units. Defaults to None,
which leaves the xmax unchanged.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> pp = yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')
>>> pp.set_xlim(1e-29, 1e-24)
>>> pp.save()
"""
p = self._profile
if xmin is None:
xmin = p.x_bins.min()
elif not hasattr(xmin, 'units'):
xmin = self.ds.quan(xmin, p.x_bins.units)
if xmax is None:
xmax = p.x_bins.max()
elif not hasattr(xmax, 'units'):
xmax = self.ds.quan(xmax, p.x_bins.units)
self._xlim = (xmin, xmax)
return self
@invalidate_plot
@invalidate_profile
def set_ylim(self, ymin=None, ymax=None):
"""Sets the plot limits for the y bin field.
Parameters
----------
ymin : float or None
The new y minimum in the current y-axis units. Defaults to None,
which leaves the ymin unchanged.
ymax : float or None
The new y maximum in the current y-axis units. Defaults to None,
which leaves the ymax unchanged.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> pp = yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')
>>> pp.set_ylim(1e4, 1e6)
>>> pp.save()
"""
p = self._profile
if ymin is None:
ymin = p.y_bins.min()
elif not hasattr(ymin, 'units'):
ymin = self.ds.quan(ymin, p.y_bins.units)
if ymax is None:
ymax = p.y_bins.max()
elif not hasattr(ymax, 'units'):
ymax = self.ds.quan(ymax, p.y_bins.units)
self._ylim = (ymin, ymax)
return self
def _recreate_profile(self):
p = self._profile
units = {p.x_field: str(p.x.units),
p.y_field: str(p.y.units)}
zunits = dict((field, str(p.field_units[field])) for field in p.field_units)
extrema = {p.x_field: self._xlim, p.y_field: self._ylim}
if self.x_log is not None or self.y_log is not None:
logs = {}
else:
logs = None
if self.x_log is not None:
logs[p.x_field] = self.x_log
if self.y_log is not None:
logs[p.y_field] = self.y_log
deposition = getattr(p, "deposition", None)
additional_kwargs = {'accumulation': p.accumulation,
'fractional': p.fractional,
'deposition': deposition}
self._profile = create_profile(
p.data_source,
[p.x_field, p.y_field],
list(p.field_map.values()),
n_bins=[len(p.x_bins)-1, len(p.y_bins)-1],
weight_field=p.weight_field,
units=units,
extrema=extrema,
logs=logs,
**additional_kwargs)
for field in zunits:
self._profile.set_field_unit(field, zunits[field])
self._profile_valid = True
class PhasePlotMPL(ImagePlotMPL):
"""A container for a single matplotlib figure and axes for a PhasePlot"""
def __init__(self, x_data, y_data, data,
x_scale, y_scale, z_scale, cmap,
zlim, figure_size, fontsize, figure, axes, cax):
self._initfinished = False
self._draw_colorbar = True
self._draw_axes = True
self._figure_size = figure_size
# Compute layout
fontscale = float(fontsize) / 18.0
if fontscale < 1.0:
fontscale = | np.sqrt(fontscale) | numpy.sqrt |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def sigmoid(h):
return 1/(1+np.exp(-h))
def binary_cross_entropy(y,p):
return -np.sum(y*np.log(p)+(1-y)*np.log(1-p))
def binary_classification_rate(y,p):
return np.mean(np.round(p)==y)
def softmax(H):
eH=np.exp(H)
return eH/eH.sum(axis=1,keepdims=True)
def multi_classification_rate(y,P):
return np.mean(y==P.argmax(axis=1))
def multi_cross_entropy(Y,P):
return -np.sum(Y*np.log(P))
#LOGISTIC FIT FUNCTION
def fit(X,y,eta=0.001,epoch=2000,lmda_l2=0,lmda_l1=0):
K=len(set(y))
N=len(y)
D=X.shape[1]
#BINARY CLASSIFICATION
if K==2:
w=np.random.randn(D+1)
PHI=np.column_stack((np.array([[1]*N]).T,X))
J=[]
for i in range (epoch):
p=sigmoid(PHI.dot(w))
J.append(binary_cross_entropy(y,p))
w-=eta*PHI.T.dot(p-y)
p=sigmoid(PHI.dot(w))
jplot=plt.figure()
plt.title('Cross Entropy Plot for Logistic Regression')
plt.plot(J)
cls_rate=binary_classification_rate(y,p)
clrt="Classification Rate is: "
roc_matrix=np.column_stack((p,np.round(p),y))
roc_matrix=roc_matrix[roc_matrix[:,0].argsort()[::-1]]
tp=np.cumsum((roc_matrix[:,1]==1)& (roc_matrix[:,2]==1))/np.sum(roc_matrix[:,2]==1)
fp=np.cumsum((roc_matrix[:,1]==1) & (roc_matrix[:,2]==0))/ | np.sum(roc_matrix[:,2]==0) | numpy.sum |
import numpy as np
import pytest
from py3nj import wigner, wigner3j
rng = np.random.RandomState(0)
# precalculated 3j symbols. Input, result
THREE_J = (
((1, 1, 0, 0, 0, 0), -np.sqrt(1.0 / 3)),
((0, 1, 1, 0, 0, 0), -np.sqrt(1.0 / 3)),
((2, 2, 0, 0, 0, 0), np.sqrt(1.0 / 5)),
((2, 1, 1, 0, 0, 0), np.sqrt(2.0 / 15)),
((2, 1, 1, 0, 0, 0), np.sqrt(2.0 / 15)),
((0, 1, 1, 0, -1, 1), np.sqrt(1.0 / 3)),
((0.5, 0.5, 1.0, -0.5, 0.5, 0), np.sqrt(1.0 / 6.0)),
((0.5, 0.5, 1.0, 0.5, -0.5, 0), np.sqrt(1.0 / 6.0)),
((0.5, 0.5, 0.0, 0.5, -0.5, 0), np.sqrt(1.0 / 2.0)),
((0.5, 0.5, 0.0, -0.5, 0.5, 0), -np.sqrt(1.0 / 2.0)),
((1, 2, 3, 0, 2, -2), -np.sqrt(1.0 / 21)),
((1, 2, 3, 1, 2, -3), np.sqrt(1.0 / 7)),
((4, 0, 0, 0, 0, 0), 0),
)
def test_drc3jj():
for three_j, result in THREE_J:
three_j = (np.array(three_j) * 2).astype(int)
l, thrcof = wigner._drc3jj(three_j[1], three_j[2], three_j[4], three_j[5])
if three_j[0] in l:
assert np.allclose(thrcof[l == three_j[0]], result)
else:
assert np.allclose(thrcof, 1)
def test_invalid():
with pytest.raises(ValueError):
l, thrcof = wigner._drc3jj(1, 3, 0, 0)
with pytest.raises(ValueError):
l, thrcof = wigner._drc3jj(1, 3, 4, 0)
@pytest.mark.parametrize("half_integer", [1, 0])
def test_drc3jj_vec(half_integer):
n = 1000
l2 = rng.randint(0, 10, size=n) * 2 + half_integer
l3 = rng.randint(0, 10, size=n) * 2 + half_integer
m2 = np.zeros(n, dtype=int)
m3 = np.zeros(n, dtype=int)
for i in range(n):
if l2[i] > 0:
m2[i] = rng.randint(-l2[i], l2[i] + 1)
if l3[i] > 0:
m3[i] = rng.randint(-l3[i], l3[i] + 1)
l, actual = wigner.drc3jj(l2, l3, m2, m3)
for _ in range(2):
l, actual = wigner.drc3jj(l2, l3, m2, m3)
for i in range(n):
if (actual[i] != 0).any():
l, expected = wigner._drc3jj(l2[i], l3[i], m2[i], m3[i])
assert | np.allclose(actual[i, l], expected) | numpy.allclose |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
np.random.seed(102)
class TestNanmedian(unittest.TestCase):
def setUp(self):
single_axis_shape = (120)
multi_axis_shape = (2, 3, 4, 5)
self.fake_data = {
"single_axis_normal":
np.random.uniform(-1, 1, single_axis_shape).astype(np.float32),
"multi_axis_normal":
np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32),
"single_axis_all_nan": np.full(single_axis_shape, np.nan),
"multi_axis_all_nan": np.full(multi_axis_shape, np.nan),
}
single_partial_nan = self.fake_data["single_axis_normal"].copy()
single_partial_nan[single_partial_nan > 0] = np.nan
multi_partial_nan = self.fake_data["multi_axis_normal"].copy()
multi_partial_nan[multi_partial_nan > 0] = np.nan
self.fake_data["single_axis_partial_nan"] = single_partial_nan
self.fake_data["multi_axis_partial_nan"] = multi_partial_nan
row_data = np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32)
row_data[:, :, :, 0] = np.nan
row_data[:, :, :2, 1] = np.nan
row_data[:, :, 2:, 2] = np.nan
self.fake_data["row_nan_even"] = row_data
self.fake_data["row_nan_float64"] = row_data.astype(np.float64)
self.fake_data["row_nan_int64"] = row_data.astype(np.int64)
self.fake_data["row_nan_int32"] = row_data.astype(np.int32)
col_data = np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32)
col_data[:, :, 0, :] = np.nan
col_data[:, :, 1, :3] = np.nan
col_data[:, :, 2, 3:] = np.nan
self.fake_data["col_nan_odd"] = col_data
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
self.axis_candiate_list = [
None, 0, 2, -1, -2, (1, 2), [0, -1], [0, 1, 3], (1, 2, 3),
[0, 2, 1, 3]
]
def test_api_static(self):
data = self.fake_data["col_nan_odd"]
paddle.enable_static()
np_res = np.nanmedian(data, keepdims=True)
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', data.shape)
out1 = paddle.nanmedian(x, keepdim=True)
out2 = paddle.tensor.nanmedian(x, keepdim=True)
out3 = paddle.tensor.stat.nanmedian(x, keepdim=True)
axis = np.arange(len(data.shape)).tolist()
out4 = paddle.nanmedian(x, axis=axis, keepdim=True)
out5 = paddle.nanmedian(x, axis=tuple(axis), keepdim=True)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': data},
fetch_list=[out1, out2, out3, out4, out5])
for out in res:
self.assertTrue(np.allclose(np_res, out, equal_nan=True))
def test_api_dygraph(self):
paddle.disable_static(self.place)
def clean_axis_numpy(axis, shape_len):
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list):
for k in range(len(axis)):
if axis[k] < 0:
axis[k] += shape_len
axis = set(axis)
return axis
def test_data_case(data):
for keep_dim in [False, True]:
if np.isnan(data).all() and keep_dim:
np_ver = np.version.version.split('.')
if int(np_ver[0]) < 1 or int(np_ver[1]) <= 20:
print(
"This numpy version does not support all nan elements when keepdim is True"
)
continue
np_res = | np.nanmedian(data, keepdims=keep_dim) | numpy.nanmedian |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.