prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy
import matplotlib.pyplot as plt
import tellurium as te
from rrplugins import Plugin
auto = Plugin("tel_auto2000")
from te_bifurcation import model2te, run_bf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import ScalarFormatter
sf = ScalarFormatter()
sf.set_scientific(False)
import re
import seaborn as sns
import os
from pickle import dump, load
from sympy import *
import sobol_seq
import pickle
# Define symbolic variables for symbolic Jacobian
R, r, C1, C2, mR1, mR2, K, K1, K2, m, a, b, mu, ksi, ksm, ki0, ki1, km0, km1, k, sR, a1, a2, b1, b2, A = symbols('R r C1 C2 mR1 mR2 K K1 K2 m a b mu ksi ksm ki0 ki1 km0 km1 k s_R a1 a2 b1 b2 A', positive=True, real=True)
c1A, c1B, c2, rev, koff, kR, sR0, mu, g = symbols('c1A c1B c2 rev koff kR sR0 mu g', positive=True, real=True)
gA, gB, srA, srB, a1A, a1B, b1A, b1B, b2A, b2B, K1A, K1B, K2A, K2B = symbols('gA gB srA srB a1A a1B b1A b1B b2A b2B K1A K1B K2A K2B', positive=True, real=True)
# Samples of parameter values
n = int(1E2) # Production run 1E5
ss = sobol_seq.i4_sobol_generate(7, int(n))
l = np.power(2, -3 + (4+3)*ss[:,:7])
a1Asp, a1Bsp, b1Asp, b1Bsp = l[:,0], l[:,1], l[:,2], l[:,3]
KAsp = 10**(ss[:,-3]*(np.log10(70000)-np.log10(7)) + | np.log10(7) | numpy.log10 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.integrate import solve_ivp
import math
plt.style.use('ggplot')
def plot_analytical(numerical=False,num_result=None):
if numerical:
z_position= num_result.altitude #altitude
else:
z_position=np.linspace(0,100000,1001)
v_position=np.zeros(501) #velocity
energy_position=np.zeros(501) #energy
dv_dz_position=np.zeros(501) #dv/dt
dE_dz_position=np.zeros(501) #dE/dz
test_v0 = 21000 #velocity
test_theta0 = 45 #angle/radian
test_r0 = 10 #radius
test_density=3000 #density
test_m0 = test_density*4/3*np.pi*test_r0**3 #mass
test_list0= np.array([test_v0, test_m0, test_theta0, test_r0]) #test example
def v_z(test_list, z, Cd=1, rho0=1.2, H=8000, angle = True):
# v(z) Calculating velocity from altitude.
rhoa = rho0*np.exp(-z/H) # atmospheric density
A = np.pi*test_list[3]**2 # area
# angle/radian transformation
if angle:
radian=test_list[2]*np.pi/180.
else:
radian=test_list[2]
return test_list[0]*np.exp(-H*Cd*rho0*np.exp(-z/H)*A/(2*test_list[1]*np.sin(radian)))
def dv_dz(test_list, z, Cd=1, rho0=1.2, H=8000, angle = True):
# dv/dz Calculating derivative of velocity from altitude
rhoa = rho0*np.exp(-z/H) # atmospheric density
A = np.pi*test_list[3]**2 # area
# angle/radian transformation
if angle:
radian=test_list[2]*np.pi/180.
else:
radian=test_list[2]
return v_z(test_list, z)*Cd*rho0*np.exp(-z/H)*A/(2*test_list[1]*np.sin(radian))
def dE_dz(test_list, z, Cd=1, rho0=1.2, H=8000, angle = True):
# dE/dz Calculating derivative of energy from altitude - chain rule
rhoa = rho0*np.exp(-z/H) # atmospheric density
A = np.pi*test_list[3]**2 # area
# angle/radian transformation
if angle:
radian=test_list[2]*np.pi/180.
else:
radian=test_list[2]
return dv_dz(test_list, z)*v_z(test_list, z)*test_list[1]/4.184e9
v_position, dE_dz_position = v_z(test_list0, z_position),dE_dz(test_list0, z_position)
fig = plt.figure(figsize=(8, 6))
ax1 = plt.subplot(111)
ax1.plot(v_position, z_position, label='analytical data')
if numerical:
ax1.plot(num_result.velocity,num_result.altitude, label= 'numerical data')
ax1.set_title('Analytical Solution V.S Numerical Solution\n Velocity-Altitude', fontsize=16)
else:
ax1.set_title('Analytical Solution\n Velocity-Altitude', fontsize=16)
ax1.set_xlabel(r'$v, velocity$', fontsize=14)
ax1.set_ylabel(r'$z, Altitude$', fontsize=14)
ax1.legend(loc='best', fontsize=14)
fig = plt.figure(figsize=(8, 6))
ax2 = plt.subplot(111)
ax2.plot(dE_dz_position, z_position, label='analytical data' )
if numerical:
ax2.plot(num_result.dedz,num_result.altitude, label= 'numerical data')
ax2.set_title('Analytical Solution V.S Numerical Solution\n Energy Loss-Altitude', fontsize=16)
else:
ax2.set_title('Analytical Solution\n Energy Loss-Altitude', fontsize=16)
ax2.set_xlabel(r'$dEdz, Energy Change per unit Height$', fontsize=14)
ax2.set_ylabel(r'$z, Altitude$', fontsize=14)
ax2.legend(loc='best', fontsize=14)
print('Analytical Solution under the conditions:\ng=0; R_P=infinity; C_L=0; no ablation; no fragmentation')
plt.show()
return v_position,dE_dz_position
class scipy_solution():
def __init__(self,setting,input_data):
self.alldata = {**setting, **input_data}
if self.alldata['radians']:
self.initial_condition = np.array([self.alldata['velocity'],self.alldata['density']*self.alldata['radius']**3*4/3*np.pi,
self.alldata['angle'],self.alldata['init_altitude'],0.,self.alldata['radius']])
else:
self.initial_condition = np.array([self.alldata['velocity'],self.alldata['density']*self.alldata['radius']**3*4/3*np.pi,
self.deg_to_rad(self.alldata['angle']),self.alldata['init_altitude'],0.,self.alldata['radius']])
def deg_to_rad(self,deg):
"""
Returns an angle in radians
for a given angle in degrees
"""
return deg*np.pi/180
def rad_to_deg(self,rad):
"""
Returns an angle in degrees
for a given angle in radians
"""
return rad*180 / np.pi
def simulation(self,t,parameters):
Cd,g,Ch,Q,Cl,Rp,alpha,rhom,rho0,H,Y = (self.alldata['Cd'],self.alldata['g'],self.alldata['Ch'],self.alldata['Q'],self.alldata['Cl'],self.alldata['Rp'],
self.alldata['alpha'],self.alldata['density'],self.alldata['rho0'],self.alldata['H'],self.alldata['strength'])
v,m,theta,z,x,r = parameters
A = np.pi*r**2
rhoa = rho0*np.exp(-z/H)
return np.array([-Cd*rhoa*A*v**2/(2*m)+g*np.sin(theta),
-Ch*rhoa*A*v**3/(2*Q),
g*np.cos(theta)/v-Cl*rhoa*A*v/(2*m)-v*np.cos(theta)/(Rp+z),
-v*np.sin(theta),
v*np.cos(theta)/(1+z/Rp),
(np.sqrt(7/2*alpha*(rhoa/rhom))*v if rhoa*v**2 >= Y else 0)])
def sci_entry(self):
t_0=self.alldata['t_0']
t_end = self.alldata['t_end']
t_step = self.alldata['dt']
tol=self.alldata['tol']
sci_result = solve_ivp(self.simulation, [t_0,t_end],self.initial_condition,
t_eval = np.arange(t_0,t_end,t_step),method='RK45',atol=tol,rtol=tol)
sci_result = pd.DataFrame({'time':sci_result.t, 'velocity':sci_result.y[0],'mass':sci_result.y[1],
'angle':self.rad_to_deg(sci_result.y[2]),'altitude':sci_result.y[3],'distance':sci_result.y[4],'radius':sci_result.y[5]})
Index_label = sci_result.query('velocity < 0').index.tolist()
Index_label += sci_result.query('altitude < 0').index.tolist()
Index_label += sci_result.query('mass < 0').index.tolist()
Index_label += [1000000]
min_index = min(Index_label)
sci_result = sci_result.drop(sci_result.index[min_index:])
if sci_result.altitude.nunique() != len(sci_result.altitude):
sci_result = sci_result.drop_duplicates(subset='altitude', keep="first")
sci_result = sci_result.reset_index()
e = 0.5*sci_result['mass']*sci_result['velocity']**2
dedz = e.diff(2)/sci_result.altitude.diff(2)/(4.184*1e9) #center approximation, with 2 nan, filled with 0 later
dedz = dedz.shift(-1)
dedz = dedz.fillna(0)
sci_result = sci_result.copy()
sci_result.insert(len(sci_result.columns),'dedz', dedz)
sci_result = sci_result.drop(sci_result.index[-1])
return sci_result
def plot_single(result,title):
fig = plt.figure(figsize=(15,10))
fig.suptitle(title, fontsize=16)
plt.subplot(331)
plt.plot(result.time,result.velocity)# t-v
plt.title('t-v', fontsize=16)
plt.subplot(332)
plt.plot(result.time,result.mass) # t-m
plt.title('t-m', fontsize=16)
plt.subplot(333)
plt.plot(result.time,result.angle) # t-theta
plt.title('t-angle', fontsize=16)
plt.subplot(334)
plt.plot(result.time,result.altitude) # t-z
plt.title('t-z', fontsize=16)
plt.subplot(335)
plt.plot(result.time,result.distance) # t-x
plt.title('t-x', fontsize=16)
plt.subplot(336)
plt.plot(result.time,result.radius) # t-r
plt.title('t-r', fontsize=16)
plt.subplot(337)
plt.plot(result.altitude,result.velocity) # z-v
plt.title('z-v', fontsize=16)
plt.subplot(338)
plt.plot(result.dedz,result.altitude) # energy-z
plt.title('de-dz', fontsize=16)
plt.subplot(339)
plt.plot(result.distance,result.altitude) # x-z
plt.title('x-z', fontsize=16)
plt.show()
def plot_contrast(result1,result2, title = 'SolScipy solution V.S Solver'):
fig = plt.figure(figsize=(15,10))
fig.suptitle(title, fontsize=16)
plt.subplot(331)
plt.plot(result1.time,result1.velocity,'r',label = 'solver')# t-v
plt.plot(result2.time,result2.velocity,'b',label = 'scipy')
plt.legend()
plt.title('t-v', fontsize=16)
plt.subplot(332)
plt.plot(result1.time,result1.mass,'r',label = 'solver')
plt.plot(result2.time,result2.mass,'b',label = 'scipy')# t-m
plt.legend()
plt.title('t-m', fontsize=16)
plt.subplot(333)
plt.plot(result1.time,result1.angle,'r',label = 'solver') # t-theta
plt.plot(result2.time,result2.angle,'b',label = 'scipy')
plt.legend()
plt.title('t-angle', fontsize=16)
plt.subplot(334)
plt.plot(result1.time,result1.altitude,'r',label = 'solver') # t-z
plt.plot(result2.time,result2.altitude,'b',label = 'scipy')
plt.legend()
plt.title('t-z', fontsize=16)
plt.subplot(335)
plt.plot(result1.time,result1.distance,'r',label = 'solver') # t-x
plt.plot(result2.time,result2.distance,'b',label = 'scipy')
plt.legend()
plt.title('t-x', fontsize=16)
plt.subplot(336)
plt.plot(result1.time,result1.radius,'r',label = 'solver') # t-r
plt.plot(result2.time,result2.radius,'b',label = 'scipy')
plt.legend()
plt.title('t-r', fontsize=16)
plt.subplot(337)
plt.plot(result1.altitude,result1.velocity,'r',label = 'solver') # z-v
plt.plot(result2.altitude,result2.velocity,'b',label = 'scipy')
plt.legend()
plt.title('z-v', fontsize=16)
plt.subplot(338)
plt.plot(result1.dedz,result1.altitude,'r',label = 'solver') # energy-z
plt.plot(result2.dedz,result2.altitude,'b',label = 'scipy')
plt.legend()
plt.title('dedz-z', fontsize=16)
plt.subplot(339)
plt.plot(result1.distance,result1.altitude,'r',label = 'solver') # x-z
plt.plot(result2.distance,result2.altitude,'b',label = 'scipy')
plt.legend()
plt.title('x-z', fontsize=16)
plt.show()
class scipy_solution_mars():
def __init__(self,setting,input_data):
self.alldata = {**setting, **input_data}
if self.alldata['radians']:
self.initial_condition = np.array([self.alldata['velocity'],self.alldata['density']*self.alldata['radius']**3*4/3*np.pi,
self.alldata['angle'],self.alldata['init_altitude'],0.,self.alldata['radius']])
else:
self.initial_condition = np.array([self.alldata['velocity'],self.alldata['density']*self.alldata['radius']**3*4/3*np.pi,
self.deg_to_rad(self.alldata['angle']),self.alldata['init_altitude'],0.,self.alldata['radius']])
def deg_to_rad(self,deg):
"""
Returns an angle in radians
for a given angle in degrees
"""
return deg*np.pi/180
def rad_to_deg(self,rad):
"""
Returns an angle in degrees
for a given angle in radians
"""
return rad*180 / np.pi
def simulation(self,t,parameters):
Cd,g,Ch,Q,Cl,Rp,alpha,rhom,rho0,H,Y = (self.alldata['Cd'],self.alldata['g'],self.alldata['Ch'],self.alldata['Q'],self.alldata['Cl'],self.alldata['Rp'],
self.alldata['alpha'],self.alldata['density'],self.alldata['rho0'],self.alldata['H'],self.alldata['strength'])
v,m,theta,z,x,r = parameters
A = np.pi*r**2
rhoa = 0.699*np.exp(-0.00009*z)/(0.1921*(249.7-0.00222*z)) if z >= 7000 else 0.699*np.exp(-0.00009*z)/(0.1921*(242.1-0.000998*z))
return np.array([-Cd*rhoa*A*v**2/(2*m)+g*np.sin(theta),
-Ch*rhoa*A*v**3/(2*Q),
g*np.cos(theta)/v-Cl*rhoa*A*v/(2*m)-v*np.cos(theta)/(Rp+z),
-v*np.sin(theta),
v*np.cos(theta)/(1+z/Rp),
(np.sqrt(7/2*alpha*(rhoa/rhom))*v if rhoa*v**2 >= Y else 0)])
def sci_entry(self):
t_0=self.alldata['t_0']
t_end = self.alldata['t_end']
t_step = self.alldata['dt']
tol=self.alldata['tol']
sci_result = solve_ivp(self.simulation, [t_0,t_end],self.initial_condition,
t_eval = | np.arange(t_0,t_end,t_step) | numpy.arange |
from __future__ import absolute_import
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
import scipy.stats
class Link(object):
"""
A generic link function for one-parameter exponential
family, with call, inverse and deriv methods.
"""
def initialize(self, Y):
return np.asarray(Y).mean() * np.ones(Y.shape)
def __call__(self, p):
return NotImplementedError
def inverse(self, z):
return NotImplementedError
def deriv(self, p):
return NotImplementedError
class Logit(Link):
"""
The logit transform as a link function:
g'(x) = 1 / (x * (1 - x))
g^(-1)(x) = exp(x)/(1 + exp(x))
"""
tol = 1.0e-10
def clean(self, p):
"""
Clip logistic values to range (tol, 1-tol)
INPUTS:
p -- probabilities
OUTPUTS: pclip
pclip -- clipped probabilities
"""
return np.clip(p, Logit.tol, 1. - Logit.tol)
def __call__(self, p):
"""
Logit transform
g(p) = log(p / (1 - p))
INPUTS:
p -- probabilities
OUTPUTS: z
z -- logit transform of p
"""
p = self.clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse logit transform
h(z) = exp(z)/(1+exp(z))
INPUTS:
z -- logit transform of p
OUTPUTS: p
p -- probabilities
"""
t = np.exp(z)
return t / (1. + t)
def deriv(self, p):
"""
Derivative of logit transform
g(p) = 1 / (p * (1 - p))
INPUTS:
p -- probabilities
OUTPUTS: y
y -- derivative of logit transform of p
"""
p = self.clean(p)
return 1. / (p * (1 - p))
logit = Logit()
class Power(Link):
"""
The power transform as a link function:
g(x) = x**power
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, x):
"""
Power transform
g(x) = x**self.power
INPUTS:
x -- mean parameters
OUTPUTS: z
z -- power transform of x
"""
return np.power(x, self.power)
def inverse(self, z):
"""
Inverse of power transform
g(x) = x**(1/self.power)
INPUTS:
z -- linear predictors in GLM
OUTPUTS: x
x -- mean parameters
"""
return np.power(z, 1. / self.power)
def deriv(self, x):
"""
Derivative of power transform
g(x) = self.power * x**(self.power - 1)
INPUTS:
x -- mean parameters
OUTPUTS: z
z -- derivative of power transform of x
"""
return self.power * np.power(x, self.power - 1)
inverse = Power(power=-1.)
inverse.__doc__ = """
The inverse transform as a link function:
g(x) = 1 / x
"""
sqrt = Power(power=0.5)
sqrt.__doc__ = """
The square-root transform as a link function:
g(x) = sqrt(x)
"""
inverse_squared = Power(power=-2.)
inverse_squared.__doc__ = """
The inverse squared transform as a link function:
g(x) = 1 / x**2
"""
identity = Power(power=1.)
identity.__doc__ = """
The identity transform as a link function:
g(x) = x
"""
class Log(Link):
"""
The log transform as a link function:
g(x) = log(x)
"""
tol = 1.0e-10
def clean(self, x):
return np.clip(x, Logit.tol, np.inf)
def __call__(self, x, **extra):
"""
Log transform
g(x) = log(x)
INPUTS:
x -- mean parameters
OUTPUTS: z
z -- log(x)
"""
x = self.clean(x)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform
g(x) = exp(x)
INPUTS:
z -- linear predictors in GLM
OUTPUTS: x
x -- exp(z)
"""
return np.exp(z)
def deriv(self, x):
"""
Derivative of log transform
g(x) = 1/x
INPUTS:
x -- mean parameters
OUTPUTS: z
z -- derivative of log transform of x
"""
x = self.clean(x)
return 1. / x
log = Log()
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution as a link function:
g(x) = dbn.ppf(x)
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link
g(p) = self.dbn.pdf(p)
INPUTS:
p -- mean parameters
OUTPUTS: z
z -- derivative of CDF transform of p
"""
p = self.clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
Derivative of CDF link
g(z) = self.dbn.cdf(z)
INPUTS:
z -- linear predictors in GLM
OUTPUTS: p
p -- inverse of CDF link of z
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
g(p) = 1/self.dbn.pdf(self.dbn.ppf(p))
INPUTS:
x -- mean parameters
OUTPUTS: z
z -- derivative of CDF transform of x
"""
p = self.clean(p)
return 1. / self.dbn.pdf(self(p))
probit = CDFLink()
probit.__doc__ = """
The probit (standard normal CDF) transform as a link function:
g(x) = scipy.stats.norm.ppf(x)
"""
cauchy = CDFLink(dbn=scipy.stats.cauchy)
cauchy.__doc__ = """
The Cauchy (standard Cauchy CDF) transform as a link function:
g(x) = scipy.stats.cauchy.ppf(x)
"""
class CLogLog(Logit):
"""
The complementary log-log transform as a link function:
g(x) = log(-log(x))
"""
def __call__(self, p):
"""
C-Log-Log transform
g(p) = log(-log(p))
INPUTS:
p -- mean parameters
OUTPUTS: z
z -- log(-log(p))
"""
p = self.clean(p)
return np.log(-np.log(p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform
g(z) = exp(-exp(z))
INPUTS:
z -- linear predictor scale
OUTPUTS: p
p -- mean parameters
"""
return np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivatve of C-Log-Log transform
g(p) = - 1 / (log(p) * p)
INPUTS:
p -- mean parameters
OUTPUTS: z
z -- - 1 / (log(p) * p)
"""
p = self.clean(p)
return -1. / ( | np.log(p) | numpy.log |
##########################################
# FUNCTIONS FOR SEARCHLIGHT RSA ANALYSES #
##########################################
# This code implements RSA within a moveable searchlight by adapting the nilearn searchlight class.
# This is extensively optimised using Numba and certain elements can be run in parallel using joblib.
# This implementation is NOT designed to be flexible however, for example it only implements Spearman
# correlation as a measure of similarity.
from numba import njit
import numpy as np
from nilearn._utils.niimg_conversions import check_niimg_4d, check_niimg_3d
from sklearn import neighbors
from nilearn.image.resampling import coord_transform
import joblib
from nilearn import image
import warnings
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from nilearn import masking
from nilearn.image.resampling import coord_transform
from nilearn._utils import check_niimg_4d
np.seterr(divide='ignore', invalid='ignore')
@njit
def get_tri(arr):
tri_idx = np.triu_indices_from(arr, k=1)
out = np.zeros(len(tri_idx[0]), arr.dtype)
for n, (i,j) in enumerate(zip(tri_idx[0], tri_idx[1])):
out[n] = arr[i,j]
return out
@njit
def scale_data(X):
return (X - np.nanmean(X)) / (np.nanstd(X) + 1e-20)
@njit
def ols(x, y, y_mask):
for i in range(y.shape[1]):
y_mask[np.isnan(y[:, i])] = True
y_mask[np.isnan(x[:, 0])] = True
x = x[~y_mask]
y = y[~y_mask]
coefs = np.dot(np.linalg.pinv(np.dot(x.T,x)),np.dot(x.T,y))
return coefs
@njit
def rankdata(a):
arr = np.ravel(np.asarray(a))
sorter = np.argsort(arr, kind='quicksort')
inv = np.empty(sorter.size, dtype=np.int16)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
arr = arr[sorter]
obs = np.hstack((np.array([True]), arr[1:] != arr[:-1]))
dense = obs.cumsum()[inv]
# cumulative counts of each unique value
count = np.hstack((np.nonzero(obs)[0], np.array([len(obs)])))
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
@njit
def pearson_corr(data1, data2):
M = data1.size
sum1 = 0.
sum2 = 0.
for i in range(M):
sum1 += data1[i]
sum2 += data2[i]
mean1 = sum1 / M
mean2 = sum2 / M
var_sum1 = 0.
var_sum2 = 0.
cross_sum = 0.
for i in range(M):
var_sum1 += (data1[i] - mean1) ** 2
var_sum2 += (data2[i] - mean2) ** 2
cross_sum += (data1[i] * data2[i])
std1 = (var_sum1 / M) ** .5
std2 = (var_sum2 / M) ** .5
cross_mean = cross_sum / M
std1 = std1 + 1e-8
std2 = std2 + 1e-8
out = (cross_mean - mean1 * mean2) / (std1 * std2)
return out
@njit
def pairwise_corr(X):
n, m = X.shape
out = np.zeros((n, n))
ranks = np.zeros((n, m))
for i in range(n):
ranks[i, :] = rankdata(X[i, :])
i_idx, j_idx = np.tril_indices_from(out)
idx = zip(i_idx, j_idx)
# ASSUMES SYMMETRY
for i, j in idx:
corr = pearson_corr(ranks[i, :], ranks[j, :])
out[i, j] = corr
out[j, i] = corr
return out
@njit
def numba_rsa_iterator(X, y, y_mask, list_rows):
par_scores = np.ones((len(list_rows), y.shape[1])) * -999
for i in range(len(list_rows)):
row = list_rows[i]
# Get RDM/RSM
new_x = np.zeros((X.shape[0], len(row)))
for n, r in enumerate(row):
new_x[:, n] = X[:, r] + 1e-8 # Avoid division by zero
pwd = pairwise_corr(new_x)
sim_data = get_tri(pwd)
sim_data[np.where(y_mask)] = np.nan
sim_data = scale_data(sim_data)
sim_data_reshaped = np.zeros((sim_data.shape[0], 1))
sim_data_reshaped[:, 0] = sim_data
if np.any(sim_data_reshaped != 0):
coefs = ols(sim_data_reshaped, y, y_mask)
par_scores[np.array(i), :] = coefs
return par_scores
@njit
def coord_transform_numba(x, y, z, affine):
shape = np.asarray(x).shape
coords = np.vstack((np.atleast_1d( | np.array([x]) | numpy.array |
import numpy as np
import SimPEG as simpeg
# campo primário para octree <<<<<<<<<(mod. Diego em 14 dez 2019)
def homo1DModelSource(mesh, freq, sigma_1d):
"""
Function that calculates and return background fields
:param Simpeg mesh object mesh: Holds information on the discretization
:param float freq: The frequency to solve at
:param np.array sigma_1d: Background model of conductivity to base the calculations on, 1d model.
:rtype: numpy.ndarray (mesh.nE, 2)
:return: eBG_bp, E fields for the background model at both polarizations.
"""
from . import get1DEfields
# from . import getEHfields # <<<<<<<<<(mod. Diego em 17 dez 2019)
# Get a 1d solution for a halfspace background
if mesh.dim == 1:
mesh1d = mesh
elif mesh.dim == 2:
mesh1d = simpeg.Mesh.TensorMesh([mesh.hy], np.array([mesh.x0[1]]))
elif mesh.dim == 3:
mesh1d = simpeg.Mesh.TensorMesh([mesh.hz], np.array([mesh.x0[2]]))
# # Note: Everything is using e^iwt
e0_1d = get1DEfields(mesh1d, sigma_1d, freq) #, mesh.vectorNz)
# Ed, Eu, Hd, Hu = getEHfields(mesh1d,sigma_1d,freq,mesh1d.vectorNx) # <<<<<<<<<(mod. Diego em 17 dez 2019)
# e0_1d = Ed + Eu
# sourceAmp = 1.0
# if sourceAmp is not None:
# e0_1d = ((e0_1d/e0_1d[-1])*sourceAmp)
E1dFieldDict = dict(zip(mesh.vectorNz, e0_1d))
if mesh.dim == 1:
eBG_px = simpeg.mkvc(e0_1d, 2)
eBG_py = -simpeg.mkvc(e0_1d, 2) # added a minus to make the results in the correct quadrents.
elif mesh.dim == 2:
ex_px = np.zeros(mesh.vnEx, dtype=complex)
ey_px = np.zeros((mesh.nEy, 1), dtype=complex)
for i in np.arange(mesh.vnEx[0]):
ex_px[i, :] = -e0_1d
eBG_px = np.vstack((simpeg.Utils.mkvc(ex_px, 2), ey_px))
# Setup y (north) polarization (_py)
ex_py = np.zeros((mesh.nEx, 1), dtype='complex128')
ey_py = np.zeros(mesh.vnEy, dtype='complex128')
# Assign the source to ey_py
for i in np.arange(mesh.vnEy[0]):
ey_py[i, :] = e0_1d
# ey_py[1:-1, 1:-1, 1:-1] = 0
eBG_py = np.vstack((ex_py, simpeg.Utils.mkvc(ey_py, 2), ez_py))
elif mesh.dim == 3:
# Setup x (east) polarization (_x)
ex_px = -np.array([E1dFieldDict[i] for i in mesh.gridEx[:, 2]]).reshape(-1, 1)
ey_px = np.zeros((mesh.nEy, 1), dtype=complex)
ez_px = np.zeros((mesh.nEz, 1), dtype=complex)
# Construct the full fields
eBG_px = np.vstack((ex_px, ey_px, ez_px))
# Setup y (north) polarization (_py)
ex_py = np.zeros((mesh.nEx, 1), dtype=complex)
ey_py = np.array([E1dFieldDict[i] for i in mesh.gridEy[:, 2]]).reshape(-1, 1)
ez_py = np.zeros((mesh.nEz, 1), dtype=complex)
# Construct the full fields
eBG_py = np.vstack((ex_py,ey_py,ez_py))
# Return the electric fields
eBG_bp = np.hstack((eBG_px, eBG_py))
return eBG_bp
#def homo1DModelSource(mesh, freq, sigma_1d):
# """
# Function that calculates and return background fields
#
# :param Simpeg mesh object mesh: Holds information on the discretization
# :param float freq: The frequency to solve at
# :param np.array sigma_1d: Background model of conductivity to base the calculations on, 1d model.
# :rtype: numpy.ndarray (mesh.nE, 2)
# :return: eBG_bp, E fields for the background model at both polarizations.
#
# """
# from . import get1DEfields
# # Get a 1d solution for a halfspace background
# if mesh.dim == 1:
# mesh1d = mesh
# elif mesh.dim == 2:
# mesh1d = simpeg.Mesh.TensorMesh([mesh.hy], np.array([mesh.x0[1]]))
# elif mesh.dim == 3:
# mesh1d = simpeg.Mesh.TensorMesh([mesh.hz], np.array([mesh.x0[2]]))
#
# # # Note: Everything is using e^iwt
# e0_1d = get1DEfields(mesh1d, sigma_1d, freq)
# if mesh.dim == 1:
# eBG_px = simpeg.mkvc(e0_1d, 2)
# eBG_py = -simpeg.mkvc(e0_1d, 2) # added a minus to make the results in the correct quadrents.
# elif mesh.dim == 2:
# ex_px = np.zeros(mesh.vnEx, dtype=complex)
# ey_px = np.zeros((mesh.nEy, 1), dtype=complex)
# for i in np.arange(mesh.vnEx[0]):
# ex_px[i, :] = -e0_1d
# eBG_px = np.vstack((simpeg.Utils.mkvc(ex_px, 2), ey_px))
# # Setup y (north) polarization (_py)
# ex_py = np.zeros((mesh.nEx, 1), dtype='complex128')
# ey_py = np.zeros(mesh.vnEy, dtype='complex128')
# # Assign the source to ey_py
# for i in np.arange(mesh.vnEy[0]):
# ey_py[i, :] = e0_1d
# # ey_py[1:-1, 1:-1, 1:-1] = 0
# eBG_py = np.vstack((ex_py, simpeg.Utils.mkvc(ey_py, 2), ez_py))
# elif mesh.dim == 3:
# # Setup x (east) polarization (_x)
# ex_px = np.zeros(mesh.vnEx, dtype=complex)
# ey_px = np.zeros((mesh.nEy, 1), dtype=complex)
# ez_px = np.zeros((mesh.nEz, 1), dtype=complex)
# # Assign the source to ex_x
# for i in np.arange(mesh.vnEx[0]):
# for j in np.arange(mesh.vnEx[1]):
# ex_px[i, j, :] = -e0_1d
# eBG_px = np.vstack((simpeg.Utils.mkvc(ex_px, 2), ey_px, ez_px))
# # Setup y (north) polarization (_py)
# ex_py = np.zeros((mesh.nEx, 1), dtype='complex128')
# ey_py = np.zeros(mesh.vnEy, dtype='complex128')
# ez_py = np.zeros((mesh.nEz, 1), dtype='complex128')
# # Assign the source to ey_py
# for i in np.arange(mesh.vnEy[0]):
# for j in np.arange(mesh.vnEy[1]):
# ey_py[i, j, :] = e0_1d
# # ey_py[1:-1, 1:-1, 1:-1] = 0
# eBG_py = np.vstack((ex_py, simpeg.Utils.mkvc(ey_py, 2), ez_py))
#
# # Return the electric fields
# eBG_bp = np.hstack((eBG_px, eBG_py))
# return eBG_bp
def analytic1DModelSource(mesh, freq, sigma_1d):
"""
Function that calculates and return background fields
:param Simpeg mesh object mesh: Holds information on the discretization
:param float freq: The frequency to solve at
:param np.array sigma_1d: Background model of conductivity to base the calculations on, 1d model.
:rtype: numpy.ndarray (mesh.nE, 2)
:return: eBG_bp, E fields for the background model at both polarizations.
"""
from . import getEHfields
# Get a 1d solution for a halfspace background
if mesh.dim == 1:
mesh1d = mesh
elif mesh.dim == 2:
mesh1d = simpeg.Mesh.TensorMesh([mesh.hy], np.array([mesh.x0[1]]))
elif mesh.dim == 3:
mesh1d = simpeg.Mesh.TensorMesh([mesh.hz], np.array([mesh.x0[2]]))
# # Note: Everything is using e^iwt
Eu, Ed, _, _ = getEHfields(mesh1d, sigma_1d, freq, mesh.vectorNz)
# Make the fields into a dictionary of location and the fields
e0_1d = Eu+Ed
E1dFieldDict = dict(zip(mesh.vectorNz, e0_1d))
if mesh.dim == 1:
eBG_px = simpeg.mkvc(e0_1d, 2)
eBG_py = -simpeg.mkvc(e0_1d, 2) # added a minus to make the results in the correct quadrents.
elif mesh.dim == 2:
ex_px = np.zeros(mesh.vnEx, dtype=complex)
ey_px = np.zeros((mesh.nEy, 1), dtype=complex)
for i in np.arange(mesh.vnEx[0]):
ex_px[i, :] = -e0_1d
eBG_px = np.vstack((simpeg.Utils.mkvc(ex_px, 2), ey_px))
# Setup y (north) polarization (_py)
ex_py = | np.zeros((mesh.nEx, 1), dtype='complex128') | numpy.zeros |
import pyinduct as pi
import numpy as np
import sympy as sp
import time
import os
import pyqtgraph as pg
import matplotlib.pyplot as plt
from pyinduct.visualization import PgDataPlot, get_colors
# matplotlib configuration
plt.rcParams.update({'text.usetex': True})
def pprint(expression="\n\n\n"):
if isinstance(expression, np.ndarray):
expression = sp.Matrix(expression)
sp.pprint(expression, num_columns=180)
def get_primal_eigenvector(according_paper=False):
if according_paper:
# some condensed parameters
alpha = beta = sym.c / 2
tau0 = 1 / sp.sqrt(sym.a * sym.b)
w = tau0 * sp.sqrt((sym.lam + alpha) ** 2 - beta ** 2)
# matrix exponential
expm_A = sp.Matrix([
[sp.cosh(w * sym.z),
(sym.lam + sym.c) / sym.b / w * sp.sinh(w * sym.z)],
[sym.lam / sym.a / w * sp.sinh(w * sym.z),
sp.cosh(w * sym.z)]
])
else:
# matrix
A = sp.Matrix([[sp.Float(0), (sym.lam + sym.c) / sym.b],
[sym.lam/sym.a, sp.Float(0)]])
# matrix exponential
expm_A = sp.exp(A * sym.z)
# inital values at z=0 (scaled by xi(s))
phi0 = sp.Matrix([[sp.Float(1)], [sym.lam / sym.d]])
# solution
phi = expm_A * phi0
return phi
def plot_eigenvalues(eigenvalues, return_figure=False):
plt.figure(facecolor="white")
plt.scatter(np.real(eigenvalues), np.imag(eigenvalues))
ax = plt.gca()
ax.set_xlabel(r"$Re(\lambda)$")
ax.set_ylabel(r"$Im(\lambda)$")
if return_figure:
return ax.get_figure()
else:
plt.show()
def check_eigenvalues(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl, ceq, ss):
# check eigenvalues of the approximation
A_sys = (-ceq[0].dynamic_forms[sys_fem_lbl].e_n_pb_inv @
ceq[0].dynamic_forms[sys_fem_lbl].matrices["E"][0][1])
A_obs = (-ceq[1].dynamic_forms[obs_fem_lbl].e_n_pb_inv @
ceq[1].dynamic_forms[obs_fem_lbl].matrices["E"][0][1])
A_modal_obs = (-ceq[2].dynamic_forms[obs_modal_lbl].e_n_pb_inv @
ceq[2].dynamic_forms[obs_modal_lbl].matrices["E"][0][1])
pprint()
pprint("Eigenvalues [{}, {}, {}]".format(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl))
pprint([np.linalg.eigvals(A_) for A_ in (A_sys, A_obs, A_modal_obs)])
def find_eigenvalues(n):
def characteristic_equation(om):
return om * (np.sin(om) + param.m * om * np.cos(om))
eig_om = pi.find_roots(
characteristic_equation, np.linspace(0, np.pi * n, 5 * n), n)
eig_vals = list(sum([(1j * ev, -1j * ev) for ev in eig_om], ()))
return eig_om, sort_eigenvalues(eig_vals)
def sort_eigenvalues(eigenvalues):
imag_ev = list()
real_ev = list()
for ev in eigenvalues:
if np.isclose( | np.imag(ev) | numpy.imag |
"""Mathematical functions operating on arrays"""
import numpy as np
def normalize_vec_l2(vecs):
"""Perform l2 normalization on each vector in a given matrix (axis 1)"""
norm = np.linalg.norm(vecs, ord=2, axis=1, keepdims=True) + 1e-6
return vecs / norm
def asmk_kernel(sim, image_ids, *, alpha, similarity_threshold):
"""Compute scores for visual words"""
mask = (sim>=similarity_threshold)
sim = | np.power(sim[mask], alpha) | numpy.power |
""" Tools for datasets, data augmentation, and general data-handling.
Include easy access to a number of datasets (MNIST and associated, CIFAR10/CIFAR100, ImageNette/ImageWoof, ImageNet).
"""
import numpy as np
import os
import torch
import torch.cuda as cuda
import torch.utils.data as data_utils
import torchvision.datasets as datasets
import torch.distributed as distributed
import torchvision.transforms as transforms
import mltools.logging as log
LOGTAG = "DATA"
# Implemented datasets name and their underlying data types.
# Datasets with same data types can use networks with similar architectures and the same preprocessing functions.
DATASETS = {'MNIST': 'MNIST', 'FashionMNIST': 'MNIST', 'CIFAR-10': 'CIFAR', 'CIFAR-100': 'CIFAR',
'ImageNette': 'ImageNet', 'ImageWoof': 'ImageNet', 'ImageNet': 'ImageNet'}
FOLDER_NAME = {'MNIST': 'MNIST', 'FashionMNIST': 'FashionMNIST', 'CIFAR-10': 'CIFAR10', 'CIFAR-100': 'CIFAR100',
'ImageNette': 'imagenette', 'ImageWoof': 'imagewoof', 'ImageNet': 'ImageNet'}
# Pre-calculated normalization values [mean, std] for pre-processing.
NORMALIZATION = {'MNIST': ((0.1307,), (0.3081,)), 'CIFAR': ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
'ImageNet': ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])}
# Default transforms ensure we get normalized data as tensors and not as PIL images.
default_mnist_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(*NORMALIZATION['MNIST'])])
default_cifar_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(*NORMALIZATION['CIFAR'])])
default_imagenet_transform = transforms.Compose([transforms.CenterCrop(256), transforms.ToTensor(),
transforms.Normalize(*NORMALIZATION['ImageNet'])])
default_transforms = {'MNIST': default_mnist_transform, 'CIFAR': default_cifar_transform,
'ImageNet': default_imagenet_transform}
# Fast default transforms made to be used in combination with FastLoader.
# Contains only transforms to be applied on the PIL image.
fast_default_imagenet_transform = transforms.CenterCrop(256)
fast_default_transforms = {'MNIST': None, 'CIFAR': None, 'ImageNet': fast_default_imagenet_transform}
class FastLoader:
"""Wraps a PyTorch :class:`torch.utils.data.DataLoader` and provides faster data loading to GPU. Also performs
normalization at run time.
Iterate over the :class:`torch.utils.data.DataLoader` to receive the (input, output) batches from the data loader.
:param data_loader: data loader from which to fetch data.
:type data_loader: torch.utils.data.DataLoader
:param data_mean: per RGB channel mean of the data to use for normalization.
:type data_mean: list[float, float, float]
:type data_std: per RGB channel standard deviation of the data to use for normalization.
:param data_std: list[float, float, float]
"""
def __init__(self, data_loader, device, data_mean, data_std):
self.data_loader = data_loader
self.device = device
self.data_mean, self.data_std = self._preprocess(data_mean), self._preprocess(data_std)
def _preprocess(self, data):
"""Utility internal function to transform normalization data to :class:`torch.Tensor` and move them to GPU.
:param data: RGB list of values to convert.
:type data: list[float, float, float]
:rtype: torch.Tensor
"""
return torch.tensor([d for d in data]).to(self.device).view(1, 3, 1, 1)
def __iter__(self):
"""Yield a batch of (input, output) from the data loader, with the inputs normalized.
:return: batch of (input, output).
:rtype: (torch.Tensor, torch.Tensor)
"""
stream = cuda.Stream(self.device)
first_entry = True
for next_input, next_target in self.data_loader:
with cuda.stream(stream):
# Pre-load a batch of input and targets to the GPU, and normalize the input:
next_input = next_input.to(self.device, non_blocking=True)
next_target = next_target.to(self.device, non_blocking=True)
next_input = next_input.float()
next_input = next_input.sub_(self.data_mean).div_(self.data_std)
if not first_entry:
yield input, target # Yield the pre-loaded batch of input and targets.
else:
# On the first entry, we have to do the pre-loading step twice (as nothing as been pre-loaded before!)
first_entry = False
cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.data_loader)
def get_loaders(dataset, batch_size, test, data_path=None, train_transform=None, validation_transform=None,
train_percentage=0.85):
"""Return PyTorch :class:`torch.utils.data.DataLoader` for training and validation, outfitted with a random sampler.
This doesn't support multiple workers and distributed training.
For performance, use :func:get_fast_loaders` instead.
If set to run on the test set, :param:`train_percentage` will be ignored and set to 1.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param batch_size: batch size for training and validation.
:type batch_size: int
:param test: run validation on the test set.
:type test: bool
:param data_path: path to folder containing dataset.
:type data_path: str
:param train_transform: PyTorch transform to apply to images for training.
:type train_transform: torchvision.transforms.Compose
:param validation_transform: PyTorch transform to apply to images for validation.
:type validation_transform: torchvision.transforms.Compose
:param train_percentage: percentage of the data in the training set.
:type train_percentage: float
:return: training and validation data loaders.
:rtype: (torch.utils.data.DataLoader, torch.utils.data.DataLoader)
"""
# Check if any parameters has been set to its default value, and if so, setup the defaults.
data_path, train_transform, validation_transform = _setup_defaults(dataset, data_path, train_transform,
validation_transform, False)
# Get all of the training data available.
train_data = _get_train_data(dataset, data_path, train_transform)
if test:
# If in test mode, fetch the test data and prepare the validation loader with it:
test_data = _get_test_data(dataset, data_path, validation_transform)
train_sampler = data_utils.RandomSampler(train_data) # Sample over the entire training set.
# We always use pinned memory as data is loaded on the CPU and then pushed to the GPU. This fastens transfers.
validation_loader = data_utils.DataLoader(test_data, batch_size=batch_size, pin_memory=True)
else:
# Otherwise, perform a train/validation split on the training data available:
dataset_size = len(train_data)
split_index = int(dataset_size * train_percentage)
indices = list(range(dataset_size))
np.random.shuffle(indices) # Make sure we don't always get the same train/validation split.
train_indices, validation_indices = indices[:split_index], indices[split_index:]
train_sampler = data_utils.SubsetRandomSampler(train_indices) # Sample over only a subset of the training set.
validation_sampler = data_utils.SubsetRandomSampler(validation_indices)
validation_loader = data_utils.DataLoader(train_data, sampler=validation_sampler, batch_size=batch_size,
pin_memory=True)
train_loader = data_utils.DataLoader(train_data, batch_size=batch_size, pin_memory=True, sampler=train_sampler,
drop_last=True)
return train_loader, validation_loader
def get_fast_loaders(dataset, batch_size, test, device, data_path=None, train_transform=None, validation_transform=None,
train_percentage=0.85, workers=4):
"""Return :class:`FastLoader` for training and validation, outfitted with a random sampler.
If set to run on the test set, :param:`train_percentage` will be ignored and set to 1.
The transforms should only include operations on PIL images and should not convert the images to a tensor, nor
handle normalization of the tensors. This is handled at runtime by the fast loaders.
If you are not looking for high-performance, prefer :func:`get_loaders`.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param batch_size: batch size for training and validation.
:type batch_size: int
:param test: run validation on the test set.
:type test: bool
:param data_path: path to folder containing dataset.
:type data_path: str
:param train_transform: PyTorch transform to apply to images for training.
:type train_transform: torchvision.transforms.Compose
:param validation_transform: PyTorch transform to apply to images for validation.
:type validation_transform: torchvision.transforms.Compose
:param train_percentage: percentage of the data in the training set.
:type train_percentage: float
:param workers: number of subprocesses to use for data loading. Use 0 for loading in the main process.
:type workers: int
:return: training and validation fast data loaders.
:rtype: (FastLoader, FastLoader)
"""
# Check if any parameters has been set to its default value, and if so, setup the defaults.
data_path, train_transform, validation_transform = _setup_defaults(dataset, data_path, train_transform,
validation_transform, fast=True)
# Get all of the training data available.
train_data = _get_train_data(dataset, data_path, train_transform)
log.log("Training data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if not test:
# Perform a train/validation split on the training data available:
# For performance reasons, the train/validation split will always be the same.
# TODO: Implement random train/validation split with fast loading and distributed training.
log.log("Running in standard training/validation mode.", LOGTAG, log.Level.INFO)
dataset_size = len(train_data)
split_index = int(dataset_size * train_percentage)
log.log("{0}:{1}".format(dataset_size, split_index), LOGTAG, log.Level.HIGHLIGHT)
validation_data = train_data[split_index:]
train_data = train_data[:split_index]
log.log("Validation data succesfully fetched!", LOGTAG, log.Level.DEBUG)
else:
# Fetch the test data:
log.log("Running in <b>test</b> mode. All training data available will be used, and "
"validation will be done on the test set. Are you really ready to publish?", LOGTAG, log.Level.WARNING)
validation_data = _get_test_data(dataset, data_path, validation_transform)
log.log("Test data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if distributed.is_initialized():
# If running in distributed mode, use a DistributedSampler:
log.log("Running in <b>distributed</b> mode. This hasn't been thoroughly tested, beware!",
LOGTAG, log.Level.WARNING)
train_sampler = data_utils.distributed.DistributedSampler(train_data)
else:
# Otherwise, default to a RandomSampler:
train_sampler = data_utils.RandomSampler(train_data)
# Build the train and validation loaders, using pinned memory and a custom collate function to build the batches.
train_loader = data_utils.DataLoader(train_data, batch_size=batch_size, num_workers=workers, pin_memory=True,
sampler=train_sampler, collate_fn=_fast_collate, drop_last=True)
log.log("Train loader succesfully created!", LOGTAG, log.Level.DEBUG)
validation_loader = data_utils.DataLoader(validation_data, batch_size=batch_size, num_workers=workers,
pin_memory=True, collate_fn=_fast_collate)
log.log("Validation loader succesfully created!", LOGTAG, log.Level.DEBUG)
# Wrap the PyTorch loaders in the custom FastLoader class and feed it the normalization parameters associated
# with the dataset.
return FastLoader(train_loader, device, *NORMALIZATION[DATASETS[dataset]]), \
FastLoader(validation_loader, device, *NORMALIZATION[DATASETS[dataset]])
# UTILITY FUNCTIONS
# The utility functions below are not meant to be used outside this module.
def _setup_defaults(dataset, data_path, train_transform, validation_transform, fast):
"""Helper function to setup default path and transforms when creating data loaders.
If any of :param:`data_path`, :param:`train_transform`, or :param:`test_transform` are None, they will be replaced
by default values.
If :param:`fast` is set, transforms compatible with :class:`FastLoader` will be used.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param data_path: path to folder containing dataset.
:param train_transform: PyTorch transform to apply to images for training.
:type train_transform: torchvision.transforms.Compose
:param validation_transform: PyTorch transform to apply to images for validation.
:type validation_transform: torchvision.transforms.Compose
:param fast: whether fast loaders are used.
:type fast: bool
:return: path to data, train transform, and test transform.
:rtype: (str, torchvision.transforms.Compose, torchvision.transforms.Compose)
"""
# Setup the path to the dataset.
if data_path is None:
if dataset in ['ImageNette', 'ImageWoof', 'ImageNet']:
# For these datasets, we cannot rely on torchvision for automatic downloading=
# TODO: Implement automatic downloading of ImageNette, Imagewoof, and ImageNet.
log.log("Auto-download of dataset {0} is not currently supported. "
"Specify a path containing the 'train' and 'val' folders of the dataset.".format(dataset),
LOGTAG, log.Level.ERROR)
raise NotImplementedError("Auto-download of dataset {0} is not currently supported, select a path.")
data_path = dataset # Default to putting the dataset in a folder named 'dataset' in the working folder.
# Setup the train and validation/test transforms.
if fast:
# Use the fast default transforms base instead:
transforms_base = fast_default_transforms
else:
transforms_base = default_transforms
# Currently, the same train and validation/test transforms are used.
# If default data augmentation is implemented, it should be on the training set and not on the validation/test one.
if train_transform is None:
train_transform = transforms_base[DATASETS[dataset]]
if validation_transform is None:
validation_transform = transforms_base[DATASETS[dataset]]
return data_path, train_transform, validation_transform
def _get_train_data(dataset, data_path, transform):
"""Helper function to retrieve training data associated with a dataset.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param data_path: path to a folder containing the dataset.
:type data_path: str
:param transform: PyTorch transform to apply to the data.
:type transform: torchvision.transforms.Compose
:return: full training data from dataset with transform applied.
:rtype: torch.utils.data.Dataset
"""
return _get_data(dataset, data_path, transform, False)
def _get_test_data(dataset, data_path, transform):
"""Helper function to retrieve test data associated with a dataset.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param data_path: path to a folder containing the dataset.
:type data_path: str
:param transform: PyTorch transform to apply to the data.
:type transform: torchvision.transforms.Compose
:return: full test data from dataset with transform applied.
:rtype: torch.utils.data.Dataset
"""
return _get_data(dataset, data_path, transform, True)
def _get_data(dataset, data_path, transform, test):
"""Helper function to retrieve training/test data associated with a dataset.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param data_path: path to a folder containing the dataset.
:type data_path: str
:param transform: PyTorch transform to apply to the data.
:type transform: torchvision.transforms.Compose
:param test: if true, return test data instead of training data.
:type test: bool
:return: full training data from dataset with transform applied.
:rtype: torch.utils.data.Dataset
"""
if dataset in ['ImageNet', 'ImageNette', 'ImageWoof']:
data_path = os.path.join(data_path, FOLDER_NAME[dataset])
if dataset == 'MNIST':
data = datasets.MNIST(data_path, train=not test, download=True, transform=transform)
elif dataset == 'FashionMNIST':
data = datasets.FashionMNIST(data_path, train=not test, download=True, transform=transform)
elif dataset == 'CIFAR-10':
data = datasets.CIFAR10(data_path, train=not test, download=True, transform=transform)
elif dataset == 'CIFAR-100':
data = datasets.CIFAR100(data_path, train=not test, download=True, transform=transform)
elif dataset in ['ImageNette', 'ImageWoof', 'ImageNet']:
# These datasets are not available in torchvision, so we find and build them ourselves:
train_directory = os.path.join(data_path, 'val' if test else 'train')
data = datasets.ImageFolder(train_directory, transform)
else:
log.log("Dataset {0} is not available ! Choose from (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, "
"ImageNette, ImageWoof, ImageNet).".format(dataset), LOGTAG, log.Level.ERROR)
raise NotImplementedError("Dataset {0} is not available!".format(dataset))
return data
def _fast_collate(batch):
"""Faster batch collation function.
Adapted from NVIDIA recommendations.
:param batch: batch of PIL images to process.
:type batch: list[PIL.Image]
:return: batch of (input, output).
:rtype: (torch.Tensor, torch.Tensor))
"""
images = [image[0] for image in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
width = images[0].size[0]
height = images[0].size[1]
tensor = torch.zeros((len(images), 3, height, width), dtype=torch.uint8)
for i, image in enumerate(images):
numpy_array = np.asarray(image, dtype=np.uint8)
if numpy_array.ndim < 3:
numpy_array = | np.expand_dims(numpy_array, axis=-1) | numpy.expand_dims |
import cv2
import math
import numpy as np
from utils.pPose_nms import pose_nms
def get_3rd_point(a, b):
"""Return vector c that perpendicular to (a - b)."""
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
"""Rotate the point by `rot_rad` degree."""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), | np.float32(src) | numpy.float32 |
#!/usr/bin/env python
""" Histogram related classes and functionality.
.. codeauthor:: <NAME> <<EMAIL>>, Yale University
"""
import collections
import itertools
import logging
from dataclasses import dataclass, field
from pathlib import Path
from types import TracebackType
from typing import Any, Callable, ContextManager, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, cast
import numpy as np
import numpy.typing as npt
from pachyderm.typing_helpers import Axis, Hist, TFile
# Setup logger
logger = logging.getLogger(__name__)
_T_ContextManager = TypeVar("_T_ContextManager")
T_Extraction_Function = Tuple[
Union[List[float], npt.NDArray[Any]], Union[List[float], npt.NDArray[Any]], Dict[str, Any]
]
class RootOpen(ContextManager[_T_ContextManager]):
"""Very simple helper to open root files."""
def __init__(self, filename: Union[Path, str], mode: str = "read"):
import ROOT
# Valdiate as a path
self.filename = Path(filename)
self.mode = mode
self.f = ROOT.TFile.Open(str(self.filename), self.mode)
def __enter__(self) -> TFile:
if not self.f or self.f.IsZombie():
raise IOError(f"Failed to open ROOT file '{self.filename}'.")
return self.f
def __exit__(
self,
execption_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""We want to pass on all raised exceptions, but ensure that the file is always closed."""
# The typing information is from here:
# https://github.com/python/mypy/blob/master/docs/source/protocols.rst#context-manager-protocols
# Pass on all of the exceptions, but make sure that the file is closed.
# NOTE: The file isn't always valid because one has to deal with ROOT,
# so we have to explicitly check that is is valid before continuing.
if self.f:
self.f.Close()
# We don't return anything because we always want the exceptions to continue
# to be raised.
def get_histograms_in_file(filename: Union[Path, str]) -> Dict[str, Any]:
"""Helper function which gets all histograms in a file.
Args:
filename: Filename of the ROOT file containing the list.
Returns:
Contains hists with keys as their names. Lists are recursively added, mirroring
the structure under which the hists were stored.
"""
# Validation
filename = Path(filename)
return get_histograms_in_list(filename=filename)
def get_histograms_in_list(filename: Union[Path, str], list_name: Optional[str] = None) -> Dict[str, Any]:
"""Get histograms from the file and make them available in a dict.
Lists are recursively explored, with all lists converted to dictionaries, such that the return
dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection``
derived objects).
Args:
filename: Filename of the ROOT file containing the list.
list_name: Name of the list to retrieve.
Returns:
Contains hists with keys as their names. Lists are recursively added, mirroring
the structure under which the hists were stored.
Raises:
ValueError: If the list could not be found in the given file.
"""
# Validation
filename = Path(filename)
hists: Dict[str, Any] = {}
with RootOpen(filename=filename, mode="READ") as fIn:
if list_name is not None:
hist_list = fIn.Get(list_name)
else:
hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()]
if not hist_list:
fIn.ls()
# Closing this file appears (but is not entirely confirmed) to be extremely important! Otherwise,
# the memory will leak, leading to ROOT memory issues!
fIn.Close()
raise ValueError(f'Could not find list with name "{list_name}". Possible names are listed above.')
# Retrieve objects in the hist list
for obj in hist_list:
_retrieve_object(hists, obj)
return hists
def _retrieve_object(output_dict: Dict[str, Any], obj: Any) -> None:
"""Function to recursively retrieve histograms from a list in a ROOT file.
``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given
ownership of the retrieved objects.
Args:
output_dict (dict): Dict under which hists should be stored.
obj (ROOT.TObject derived): Object(s) to be stored. If it is a collection,
it will be recursed through.
Returns:
None: Changes in the dict are reflected in the output_dict which was passed.
"""
import ROOT
# Store TH1 or THn
if isinstance(obj, ROOT.TH1) or isinstance(obj, ROOT.THnBase):
# Ensure that it is not lost after the file is closed
# Only works for TH1
if isinstance(obj, ROOT.TH1):
obj.SetDirectory(0)
# Explicitly note that python owns the object
# From more on memory management with ROOT and python, see:
# https://root.cern.ch/root/html/guides/users-guide/PythonRuby.html#memory-handling
ROOT.SetOwnership(obj, False)
# Store the object
output_dict[obj.GetName()] = obj
# Recurse over lists
if isinstance(obj, ROOT.TCollection):
# Keeping it in order simply makes it easier to follow
output_dict[obj.GetName()] = {}
# Iterate over the objects in the collection and recursively store them
for obj_temp in list(obj):
_retrieve_object(output_dict[obj.GetName()], obj_temp)
if isinstance(obj, ROOT.TDirectory):
# Keeping it in order simply makes it easier to follow
output_dict[obj.GetName()] = {}
# Iterate over the objects in the collection and recursively store them
for obj_temp in obj.GetListOfKeys():
_retrieve_object(output_dict[obj.GetName()], obj_temp.ReadObj())
def _extract_values_from_hepdata_dependent_variable(var: Mapping[str, Any]) -> T_Extraction_Function:
"""Extract values from a HEPdata dependent variable.
As the simplest useful HEPdata extraction function possible, it retrieves y values, symmetric
statical errors. Symmetric systematic errors are stored in the metadata.
Args:
var: HEPdata dependent variable.
Returns:
y values, errors squared, metadata containing the systematic errors.
"""
values = var["values"]
hist_values = [val["value"] for val in values]
# For now, only support symmetric errors.
hist_stat_errors = []
hist_sys_errors = []
for val in values:
for error in val["errors"]:
if error["label"] == "stat":
hist_stat_errors.append(error["symerror"])
elif "sys" in error["label"]:
hist_sys_errors.append(error["symerror"])
# Validate the collected values.
if len(hist_stat_errors) == 0:
raise ValueError(
f"Could not retrieve statistical errors for dependent var {var}.\n" f" hist_stat_errors: {hist_stat_errors}"
)
if len(hist_values) != len(hist_stat_errors):
raise ValueError(
f"Could not retrieve the same number of values and statistical errors for dependent var {var}.\n"
f" hist_values: {hist_values}\n"
f" hist_stat_errors: {hist_stat_errors}"
)
if len(hist_sys_errors) != 0 and len(hist_sys_errors) != len(hist_stat_errors):
raise ValueError(
f"Could not extract the same number of statistical and systematic errors for dependent var {var}.\n"
f" hist_stat_errors: {hist_stat_errors}\n"
f" hist_sys_errors: {hist_sys_errors}"
)
# Create the histogram
metadata: Dict[str, Any] = {"sys_error": np.array(hist_sys_errors)}
return hist_values, hist_stat_errors, metadata
# Typing helpers
_T = TypeVar("_T", bound="Histogram1D")
@dataclass
class Histogram1D:
"""Contains histogram data.
Note:
Underflow and overflow bins are excluded!
When converting from a TH1 (either from ROOT or uproot), additional statistical information will be extracted
from the hist to enable the calculation of additional properties. The information available is:
- Total sum of weights (equal to np.sum(self.y), which we store)
- Total sum of weights squared (equal to np.sum(self.errors_squared), which we store)
- Total sum of weights * x
- Total sum of weights * x * x
Each is a single float value. Since the later two values are unique, they are stored in the metadata.
Args:
bin_edges (np.ndarray): The histogram bin edges.
y (np.ndarray): The histogram bin values.
errors_squared (np.ndarray): The bin sum weight squared errors.
Attributes:
x (np.ndarray): The bin centers.
y (np.ndarray): The bin values.
bin_edges (np.ndarray): The bin edges.
errors (np.ndarray): The bin errors.
errors_squared (np.ndarray): The bin sum weight squared errors.
metadata (dict): Any additional metadata that should be stored with the histogram. Keys are expected to be
strings, while the values can be anything. For example, could contain systematic errors, etc.
"""
bin_edges: npt.NDArray[Any]
y: npt.NDArray[Any]
errors_squared: npt.NDArray[Any]
metadata: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self) -> None:
"""Perform validation on the inputs."""
# Define this array for convenience in accessing the members.
arrays = {k: v for k, v in vars(self).items() if not k.startswith("_") and k != "metadata"}
# Ensure that they're numpy arrays.
for name, arr in arrays.items():
try:
setattr(self, name, np.array(arr))
except TypeError as e:
raise ValueError(
f"Arrays must be numpy arrays, but could not convert object {name} of"
f" type {type(arr)} to numpy array."
) from e
# Ensure that they're the appropriate length
if not (len(self.bin_edges) - 1 == len(self.y) == len(self.errors_squared)):
logger.debug("mis matched")
raise ValueError(
f"Length of input arrays doesn't match! Bin edges should be one longer than"
f" y and errors_squared. Lengths: bin_edges: {len(self.bin_edges)},"
f" y: {len(self.y)}, errors_squared: {len(self.errors_squared)}"
)
# Ensure they don't point to one another (which can cause issues when performing
# operations in place).
for (a_name, a), (b_name, b) in itertools.combinations(arrays.items(), 2):
if np.may_share_memory(a, b): # type: ignore
logger.warning(f"Object '{b_name}' shares memory with object '{a_name}'. Copying object '{b_name}'!")
setattr(self, b_name, b.copy())
# Create stats based on the stored data.
# Only recalculate if they haven't already been passed in via the metadata.
calculate_stats = False
for key in _stats_keys:
if key not in self.metadata:
calculate_stats = True
break
if calculate_stats:
self._recalculate_stats()
@property
def errors(self) -> npt.NDArray[Any]:
res: npt.NDArray[Any] = np.sqrt(self.errors_squared)
return res
@property
def bin_widths(self) -> npt.NDArray[Any]:
"""Bin widths calculated from the bin edges.
Returns:
Array of the bin widths.
"""
res: npt.NDArray[Any] = self.bin_edges[1:] - self.bin_edges[:-1]
return res
@property
def x(self) -> npt.NDArray[Any]:
"""The histogram bin centers (``x``).
This property caches the x value so we don't have to calculate it every time.
Args:
None
Returns:
Array of center of bins.
"""
try:
return self._x
except AttributeError:
half_bin_widths = self.bin_widths / 2
x = self.bin_edges[:-1] + half_bin_widths
self._x: npt.NDArray[Any] = x
return self._x
@property
def mean(self) -> float:
"""Mean of values filled into the histogram.
Calculated in the same way as ROOT and physt.
Args:
None.
Returns:
Mean of the histogram.
"""
return binned_mean(self.metadata)
@property
def std_dev(self) -> float:
"""Standard deviation of the values filled into the histogram.
Calculated in the same way as ROOT and physt.
Args:
None.
Returns:
Standard deviation of the histogram.
"""
return binned_standard_deviation(self.metadata)
@property
def variance(self) -> float:
"""Variance of the values filled into the histogram.
Calculated in the same way as ROOT and physt.
Args:
None.
Returns:
Variance of the histogram.
"""
return binned_variance(self.metadata)
@property
def n_entries(self) -> float:
"""The number of entries in the hist.
Note:
This value is dependent on the weight. We don't have a weight independent measure like a ROOT hist,
so this value won't agree with the number of entries from a weighted ROOT hist.
"""
return cast(float, np.sum(self.y))
def find_bin(self, value: float) -> int:
"""Find the bin corresponding to the specified value.
For further information, see ``find_bin(...)`` in this module.
Note:
Bins are 0-indexed here, while in ROOT they are 1-indexed.
Args:
value: Value for which we want want the corresponding bin.
Returns:
Bin corresponding to the value.
"""
return find_bin(self.bin_edges, value)
def copy(self: _T) -> _T:
"""Copies the object.
In principle, this should be the same as ``copy.deepcopy(...)``, at least when this was written in
Feb 2019. But ``deepcopy(...)`` often seems to have very bad performance (and perhaps does additional
implicit copying), so we copy these numpy arrays by hand.
"""
# We want to copy bin_edges, y, and errors_squared, but not anything else. Namely, we skip _x here.
# In principle, it wouldn't really be a problem to copy, but there may be other "_" fields that we
# want to skip later, so we do the right thing now.
kwargs: Dict[str, Any] = {
k: np.array(v, copy=True) for k, v in vars(self).items() if not k.startswith("_") and k != "metadata"
}
# We also want to make an explicit copy of the metadata
kwargs["metadata"] = self.metadata.copy()
return type(self)(**kwargs)
def counts_in_interval(
self,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
min_bin: Optional[int] = None,
max_bin: Optional[int] = None,
) -> Tuple[float, float]:
"""Count the number of counts within bins in an interval.
Note:
The integration limits could be described as inclusive. This matches the ROOT convention.
See ``histogram1D._integral(...)`` for further details on how these limits are determined.
Note:
The arguments can be mixed (ie. a min bin and a max value), so be careful!
Args:
min_value: Minimum value for the integral (we will find the bin which contains this value).
max_value: Maximum value for the integral (we will find the bin which contains this value).
min_bin: Minimum bin for the integral.
max_bin: Maximum bin for the integral.
Returns:
(value, error): Integral value, error
"""
return self._integral(
min_value=min_value,
max_value=max_value,
min_bin=min_bin,
max_bin=max_bin,
multiply_by_bin_width=False,
)
def integral(
self,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
min_bin: Optional[int] = None,
max_bin: Optional[int] = None,
) -> Tuple[float, float]:
"""Integrate the histogram over the given range.
Note:
Be very careful here! The equivalent of `TH1::Integral(...)` is `counts_in_interval(..)`.
That's because when we multiply by the bin width, we implicitly should be resetting the stats.
We will still get the right answer in terms of y and errors_squared, but if this result is used
to normalize the hist, the stats will be wrong. We can't just reset them here because the integral
doesn't modify the histogram.
Note:
The integration limits could be described as inclusive. This matches the ROOT convention.
See ``histogram1D._integral(...)`` for further details on how these limits are determined.
Note:
The arguments can be mixed (ie. a min bin and a max value), so be careful!
Args:
min_value: Minimum value for the integral (we will find the bin which contains this value).
max_value: Maximum value for the integral (we will find the bin which contains this value).
min_bin: Minimum bin for the integral.
max_bin: Maximum bin for the integral.
Returns:
(value, error): Integral value, error
"""
return self._integral(
min_value=min_value,
max_value=max_value,
min_bin=min_bin,
max_bin=max_bin,
multiply_by_bin_width=True,
)
def _integral(
self,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
min_bin: Optional[int] = None,
max_bin: Optional[int] = None,
multiply_by_bin_width: bool = False,
) -> Tuple[float, float]:
"""Integrate the histogram over the specified range.
This function provides the underlying implementation of the integral, giving the option to multiply
by the bin with (in which case, one gets the integral), or not (in which case, one gets the number
of counts in the range).
Note:
Limits of the integral could be described as inclusive. To understand this, consider an example
where the bin edges are ``[0, 1, 2, 5]``, and we request value limits of ``(1.2, 3.6)``. The limits
correspond to bins ``(1, 2)``, and therefore the integral will include the values from both bins 1 and 2.
This matches the ROOT convention, and means that if a user wants the counts in only one bin, they
should set the upper min and max bins to the same bin.
Args:
min_value: Minimum value for the integral (we will find the bin which contains this value).
max_value: Maximum value for the integral (we will find the bin which contains this value).
min_bin: Minimum bin for the integral.
max_bin: Maximum bin for the integral.
multiply_by_bin_width: If true, we will multiply each value by the bin width. The should be done
for integrals, but not for counting values in an interval.
Returns:
(value, error): Integral value, error
"""
# Validate arguments
# Specified both values and bins, which is invalid.
if min_value is not None and min_bin is not None:
raise ValueError("Specified both min value and min bin. Only specify one.")
if max_value is not None and max_bin is not None:
raise ValueError("Specified both max value and max bin. Only specify one.")
# Determine the bins from the values
if min_value is not None:
min_bin = self.find_bin(min_value)
if max_value is not None:
max_bin = self.find_bin(max_value)
# Help out mypy.
assert min_bin is not None
assert max_bin is not None
# Final validation to ensure that the bins properly ordered, with the min <= max.
# NOTE: It is valid for the bins to be equal. In that case, we only take values from that single bin.
if min_bin > max_bin:
raise ValueError(
f"Passed min_bin {min_bin} which is greater than the max_bin {max_bin}. The min bin must be smaller."
)
# Provide the opportunity to scale by bin width
widths = np.ones(len(self.y))
if multiply_by_bin_width:
widths = self.bin_widths
# Integrate by summing up all of the bins and the errors.
# Perform the integral.
# NOTE: We set the upper limits to + 1 from the found value because we want to include the bin
# where the upper limit resides. This matches the ROOT convention. Practically, this means
# that if the user wants to integrate over 1 bin, then the min bin and max bin should be the same.
logger.debug(f"Integrating from {min_bin} - {max_bin + 1}")
value = np.sum(self.y[min_bin : max_bin + 1] * widths[min_bin : max_bin + 1])
error_squared = np.sum(self.errors_squared[min_bin : max_bin + 1] * widths[min_bin : max_bin + 1] ** 2)
# We explicitly cast the final result to float to ensure that it doesn't cause any problems
# with saving the final values to YAML.
return float(value), float(np.sqrt(error_squared))
def _recalculate_stats(self: _T) -> None:
"""Recalculate the hist stats."""
self.metadata.update(
calculate_binned_stats(bin_edges=self.bin_edges, y=self.y, weights_squared=self.errors_squared)
)
def __add__(self: _T, other: _T) -> _T:
"""Handles ``a = b + c.``"""
new = self.copy()
new += other
return new
def __radd__(self: _T, other: _T) -> _T:
"""For use with sum(...)."""
if other == 0:
return self
else:
return self + other
def __iadd__(self: _T, other: _T) -> _T:
"""Handles ``a += b``."""
if not np.allclose(self.bin_edges, other.bin_edges):
raise TypeError(
f"Binning is different for given histograms."
f"len(self): {len(self.bin_edges)}, len(other): {len(other.bin_edges)}."
f"Cannot add!"
)
self.y += other.y
self.errors_squared += other.errors_squared
# Update stats.
for key in _stats_keys:
if key not in self.metadata:
logger.warning(f"Add: Missing stats {key} in existing hist. Can not update stored stats.")
continue
if key not in other.metadata:
logger.warning(f"Add: Missing stats {key} in other hist. Can not update stored stats.")
continue
self.metadata[key] += other.metadata[key]
return self
def __sub__(self: _T, other: _T) -> _T:
"""Handles ``a = b - c``."""
new = self.copy()
new -= other
return new
def __isub__(self: _T, other: _T) -> _T:
"""Handles ``a -= b``."""
if not np.allclose(self.bin_edges, other.bin_edges):
raise TypeError(
f"Binning is different for given histograms."
f"len(self): {len(self.bin_edges)}, len(other): {len(other.bin_edges)}."
f"Cannot subtract!"
)
self.y -= other.y
self.errors_squared += other.errors_squared
# According to ROOT, we need to reset stats because we are subtracting. Otherwise, one
# can get negative variances
self._recalculate_stats()
return self
def _scale_stats(self: _T, scale_factor: float) -> None:
for key in _stats_keys:
if key not in self.metadata:
logger.warning(f"Scaling: Missing stats {key}. Can not update stored stats.")
continue
factor = scale_factor
if key == "_total_sum_w2":
factor = scale_factor * scale_factor
self.metadata[key] = self.metadata[key] * factor
def __mul__(self: _T, other: Union[_T, float]) -> _T:
"""Handles ``a = b * c``."""
new = self.copy()
new *= other
return new
def __imul__(self: _T, other: Union[_T, float]) -> _T:
"""Handles ``a *= b``."""
if np.isscalar(other) or isinstance(other, np.ndarray):
# Help out mypy...
assert isinstance(other, (float, int, np.number, np.ndarray))
# Scale histogram by a scalar
self.y *= other
self.errors_squared *= other ** 2
# Scale stats accordingly. We can only preserve the stats if using a scalar (according to ROOT).
if | np.isscalar(other) | numpy.isscalar |
# ---
# jupyter:
# accelerator: GPU
# colab:
# name: Rigid Object Tutorial
# provenance: []
# jupytext:
# cell_metadata_filter: -all
# formats: nb_python//py:percent,colabs//ipynb
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %%
# !curl -L https://raw.githubusercontent.com/facebookresearch/habitat-sim/master/examples/colab_utils/colab_install.sh | NIGHTLY=true bash -s
# %%
# %cd /content/habitat-sim
## [setup]
import math
import os
import random
import sys
import git
import magnum as mn
import numpy as np
import habitat_sim
from habitat_sim.utils import viz_utils as vut
if "google.colab" in sys.modules:
os.environ["IMAGEIO_FFMPEG_EXE"] = "/usr/bin/ffmpeg"
repo = git.Repo(".", search_parent_directories=True)
dir_path = repo.working_tree_dir
# %cd $dir_path
data_path = os.path.join(dir_path, "data")
output_path = os.path.join(dir_path, "examples/tutorials/rigid_object_tutorial_output/")
def remove_all_objects(sim):
for id_ in sim.get_existing_object_ids():
sim.remove_object(id_)
def place_agent(sim):
# place our agent in the scene
agent_state = habitat_sim.AgentState()
agent_state.position = [-0.15, -0.7, 1.0]
agent_state.rotation = np.quaternion(-0.83147, 0, 0.55557, 0)
agent = sim.initialize_agent(0, agent_state)
return agent.scene_node.transformation_matrix()
def make_configuration():
# simulator configuration
backend_cfg = habitat_sim.SimulatorConfiguration()
backend_cfg.scene_id = os.path.join(
data_path, "scene_datasets/habitat-test-scenes/apartment_1.glb"
)
assert os.path.exists(backend_cfg.scene_id)
backend_cfg.enable_physics = True
# sensor configurations
# Note: all sensors must have the same resolution
# setup 2 rgb sensors for 1st and 3rd person views
camera_resolution = [544, 720]
sensors = {
"rgba_camera_1stperson": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": camera_resolution,
"position": [0.0, 0.6, 0.0],
"orientation": [0.0, 0.0, 0.0],
},
"depth_camera_1stperson": {
"sensor_type": habitat_sim.SensorType.DEPTH,
"resolution": camera_resolution,
"position": [0.0, 0.6, 0.0],
"orientation": [0.0, 0.0, 0.0],
},
"rgba_camera_3rdperson": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": camera_resolution,
"position": [0.0, 1.0, 0.3],
"orientation": [-45, 0.0, 0.0],
},
}
sensor_specs = []
for sensor_uuid, sensor_params in sensors.items():
sensor_spec = habitat_sim.SensorSpec()
sensor_spec.uuid = sensor_uuid
sensor_spec.sensor_type = sensor_params["sensor_type"]
sensor_spec.resolution = sensor_params["resolution"]
sensor_spec.position = sensor_params["position"]
sensor_spec.orientation = sensor_params["orientation"]
sensor_specs.append(sensor_spec)
# agent configuration
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
return habitat_sim.Configuration(backend_cfg, [agent_cfg])
def simulate(sim, dt=1.0, get_frames=True):
# simulate dt seconds at 60Hz to the nearest fixed timestep
print("Simulating " + str(dt) + " world seconds.")
observations = []
start_time = sim.get_world_time()
while sim.get_world_time() < start_time + dt:
sim.step_physics(1.0 / 60.0)
if get_frames:
observations.append(sim.get_sensor_observations())
return observations
# [/setup]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--no-show-video", dest="show_video", action="store_false")
parser.add_argument("--no-make-video", dest="make_video", action="store_false")
parser.set_defaults(show_video=True, make_video=True)
args, _ = parser.parse_known_args()
show_video = args.show_video
make_video = args.make_video
if make_video and not os.path.exists(output_path):
os.mkdir(output_path)
# %%
# [initialize]
# create the simulators AND resets the simulator
cfg = make_configuration()
try: # Got to make initialization idiot proof
sim.close()
except NameError:
pass
sim = habitat_sim.Simulator(cfg)
agent_transform = place_agent(sim)
# get the primitive assets attributes manager
prim_templates_mgr = sim.get_asset_template_manager()
# get the physics object attributes manager
obj_templates_mgr = sim.get_object_template_manager()
# [/initialize]
# %%
# [basics]
# load some object templates from configuration files
sphere_template_id = obj_templates_mgr.load_configs(
str(os.path.join(data_path, "test_assets/objects/sphere"))
)[0]
# add an object to the scene
id_1 = sim.add_object(sphere_template_id)
sim.set_translation(np.array([2.50, 0, 0.2]), id_1)
# simulate
observations = simulate(sim, dt=1.5, get_frames=make_video)
if make_video:
vut.make_video(
observations,
"rgba_camera_1stperson",
"color",
output_path + "sim_basics",
open_vid=show_video,
)
# [/basics]
remove_all_objects(sim)
# %%
# [dynamic_control]
observations = []
obj_templates_mgr.load_configs(str(os.path.join(data_path, "objects")))
# search for an object template by key sub-string
cheezit_template_handle = obj_templates_mgr.get_template_handles(
"data/objects/cheezit"
)[0]
box_positions = [
np.array([2.39, -0.37, 0]),
np.array([2.39, -0.64, 0]),
np.array([2.39, -0.91, 0]),
np.array([2.39, -0.64, -0.22]),
np.array([2.39, -0.64, 0.22]),
]
box_orientation = mn.Quaternion.rotation(
mn.Rad(math.pi / 2.0), np.array([-1.0, 0, 0])
)
# instance and place the boxes
box_ids = []
for b in range(5):
box_ids.append(sim.add_object_by_handle(cheezit_template_handle))
sim.set_translation(box_positions[b], box_ids[b])
sim.set_rotation(box_orientation, box_ids[b])
# get the object's initialization attributes (all boxes initialized with same mass)
object_init_template = sim.get_object_initialization_template(box_ids[0])
# anti-gravity force f=m(-g)
anti_grav_force = -1.0 * sim.get_gravity() * object_init_template.mass
# throw a sphere at the boxes from the agent position
sphere_template = obj_templates_mgr.get_template_by_ID(sphere_template_id)
sphere_template.scale = np.array([0.5, 0.5, 0.5])
obj_templates_mgr.register_template(sphere_template)
sphere_id = sim.add_object(sphere_template_id)
sim.set_translation(
sim.agents[0].get_state().position + np.array([0, 1.0, 0]), sphere_id
)
# get the vector from the sphere to a box
target_direction = sim.get_translation(box_ids[0]) - sim.get_translation(sphere_id)
# apply an initial velocity for one step
sim.set_linear_velocity(target_direction * 5, sphere_id)
sim.set_angular_velocity( | np.array([0, -1.0, 0]) | numpy.array |
import plotly.graph_objects as go
import numpy as np
import pandas as pd
from . import props
class Plot():
def __init__(self, name : str, data : str, out : str):
self.name = name
self.data : str = data
self.out : str = out
self.x : np.array
self.y : np.array
self.xuncert : np.ndarray
self.yuncert : np.ndarray
self.avx : np.float64
self.avy : np.float64
self.maxx : np.float64
self.maxy : np.float64
self.trline : np.ndarray
self.rng : np.ndarray
self.corrcoef : np.float64
def createGraph(self):
self.parseData()
fig = go.Figure(data=props.data(self.x, self.y, self.xuncert, self.yuncert), layout=props.layout(self.name, self.trline, self.corrcoef, self.avx, self.avy, self.maxx, self.maxy))
fig.add_trace(props.trendline(self.rng, self.trline))
fig.write_image(self.out)
def parseData(self):
df = pd.read_csv(self.data)
self.x = np.array(df['x'])
self.y = np.array(df['y'])
self.xuncert = np.array(df['x±'])
self.yuncert = np.array(df['y±'])
self.avx = np.average(self.x)
self.avy = np.average(self.y)
self.maxx = np.max(self.x) + np.max(self.xuncert)
self.maxy = np.max(self.y) + np.max(self.yuncert)
self.trline = np.polyfit(self.x, self.y, 1)
self.rng = np.linspace(np.amin(self.x), | np.amax(self.x) | numpy.amax |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File name: load_data.py
Author: locke
Date created: 2020/3/25 下午7:00
"""
import time
import numpy as np
import sklearn
import time
import torch
from sklearn.neighbors import KDTree
import heapq
def clear_attribute_triples(attribute_triples):
print('\nbefore clear:', len(attribute_triples))
# step 1
attribute_triples_new = set()
attr_num = {}
for (e, a, _) in attribute_triples:
ent_num = 1
if a in attr_num:
ent_num += attr_num[a]
attr_num[a] = ent_num
attr_set = set(attr_num.keys())
attr_set_new = set()
for a in attr_set:
if attr_num[a] >= 10:
attr_set_new.add(a)
for (e, a, v) in attribute_triples:
if a in attr_set_new:
attribute_triples_new.add((e, a, v))
attribute_triples = attribute_triples_new
print('after step 1:', len(attribute_triples))
# step 2
attribute_triples_new = []
literals_number, literals_string = [], []
for (e, a, v) in attribute_triples:
if '"^^' in v:
v = v[:v.index('"^^')]
if v.endswith('"@en'):
v = v[:v.index('"@en')]
if is_number(v):
literals_number.append(v)
else:
literals_string.append(v)
v = v.replace('.', '').replace('(', '').replace(')', '').replace(',', '').replace('"', '')
v = v.replace('_', ' ').replace('-', ' ').replace('/', ' ')
if 'http' in v:
continue
attribute_triples_new.append((e, a, v))
attribute_triples = attribute_triples_new
print('after step 2:', len(attribute_triples))
return attribute_triples, literals_number, literals_string
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
class AlignmentData:
def __init__(self, data_dir="data/D_W_15K_V1", rate=0.3, share=False, swap=False, val=0.0, with_r=False,OpenEa = False,rev_relation = True):
t_ = time.time()
self.rev_relation = True
self.rate = rate
self.val = val
if(OpenEa):
self.ins2id_dict, self.id2ins_dict, [self.kg1_ins_ids, self.kg2_ins_ids] = self.OpenEa_load_dict(
data_dir + "/ent_links")
self.rel2id_dict, self.id2rel_dict, [self.kg1_rel_ids, self.kg2_rel_ids] = self.OpenEa_load_relation_dict(
data_dir + "/rel_triples_")
self.attr2id_dict, self.id2attr_dict, [self.kg1_attr_ids, self.kg2_attr_ids] = self.OpenEa_load_relation_dict(
data_dir + "/attr_triples_")
self.ins_num = len(self.ins2id_dict)
self.rel_num = len(self.rel2id_dict)
if(self.rev_relation):
self.rel_num = 2
self.num_attr = len(self.attr2id_dict)
self.triple_idx = self.OpenEa_load_triples(data_dir + "/rel_triples_", file_num=2)
self.ill_idx = self.OpenEa_entities_load_triples(data_dir + "/ent_links", file_num=1)
self.ill_train_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/train_links", file_num=1))
#self.ill_val_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/valid_links", file_num=1))
self.ill_val_idx = []
self.ill_test_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/test_links", file_num=1))
self.atrr_idx = self.OpenEa_load_attributes(data_dir + "/attr_triples_", file_num=2)
else:
self.ins2id_dict, self.id2ins_dict, [self.kg1_ins_ids, self.kg2_ins_ids] = self.load_dict(data_dir + "/ent_ids_", file_num=2)
self.rel2id_dict, self.id2rel_dict, [self.kg1_rel_ids, self.kg2_rel_ids] = self.load_dict(data_dir + "/rel_ids_", file_num=2)
self.ins_num = len(self.ins2id_dict)
self.rel_num = len(self.rel2id_dict)
self.triple_idx = self.load_triples(data_dir + "/triples_", file_num=2)
self.ill_idx = self.load_triples(data_dir + "/ill_ent_ids", file_num=1)
np.random.shuffle(self.ill_idx)
self.ill_train_idx, self.ill_val_idx, self.ill_test_idx = np.array(self.ill_idx[:int(len(self.ill_idx) // 1 * rate)], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * rate) : int(len(self.ill_idx) // 1 * (rate+val))], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * (rate+val)):], dtype=np.int32)
if (self.rev_relation):
self.rel_num *= 2
rev_triple_idx = []
for (h, r, t) in self.triple_idx:
rev_triple_idx.append((t, r + self.rel_num // 2, h))
self.triple_idx += rev_triple_idx
self.ill_idx_dic = {}
for x in self.ill_idx:
self.ill_idx_dic[x[0]] = x[1]
self.ill_idx_dic[x[1]] = x[0]
self.ins_G_edges_idx, self.ins_G_values_idx, self.r_ij_idx = self.gen_sparse_graph_from_triples(self.triple_idx, self.ins_num, with_r)
assert (share != swap or (share == False and swap == False))
if share:
self.triple_idx = self.share(self.triple_idx, self.ill_train_idx) # 1 -> 2:base
if(OpenEa):
self.triple_idx = self.share_attr(self.atrr_idx, self.ill_train_idx)
self.kg1_ins_ids = (self.kg1_ins_ids - set(self.ill_train_idx[:, 0])) | set(self.ill_train_idx[:, 1])
self.ill_train_idx = []
if swap:
self.triple_idx = self.swap(self.triple_idx, self.ill_train_idx)
if(OpenEa):
self.triple_idx = self.swap_attr(self.atrr_idx, self.ill_train_idx)
self.labeled_alignment = set()
self.boot_triple_idx = []
self.boot_pair_dix = []
self.init_time = time.time() - t_
def load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple(map(int, i.split("\t"))) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def load_dict(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
what2id, id2what, ids = {}, {}, []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [i.split("\t") for i in data]
what2id = {**what2id, **dict([[i[1], int(i[0])] for i in data])}
id2what = {**id2what, **dict([[int(i[0]), i[1]] for i in data])}
ids.append(set([int(i[0]) for i in data]))
return what2id, id2what, ids
def OpenEa_load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.rel2id_dict[i.split("\t")[1]],self.ins2id_dict[i.split("\t")[2]]]) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def OpenEa_load_attributes(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.attr2id_dict[i.split("\t")[1]],i.split("\t")[2]]) for i in data]
triple += data
triple, _, _ = clear_attribute_triples(triple)
np.random.shuffle(triple)
return triple
def OpenEa_entities_load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.ins2id_dict[i.split("\t")[1]]]) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def OpenEa_load_dict(self, file_name):
ids = []
kg1_ents_uri = []
kg2_ents_uri = []
ins2id_dict = {}
id2ins_dict = {}
with open(file_name, "r", encoding="utf-8") as f:
lines = f.read().strip().split("\n")
kg1_ents_uri = [line.split('\t')[0] for line in lines]
kg2_ents_uri = [line.split('\t')[1] for line in lines]
id = 0
for item in kg1_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
n1 = id
for item in kg2_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
n2 = id - n1
ids.append(set([i for i in range(n1)]))
ids.append(set([i+n1 for i in range(n2)]))
return ins2id_dict, id2ins_dict, ids
def OpenEa_load_relation_dict(self, file_name):
ids = []
kg1_ents_uri = []
kg2_ents_uri = []
ins2id_dict = {}
id2ins_dict = {}
id = 0
pre_n = 0
for i in range(2):
with open(file_name + str(i+1), "r", encoding="utf-8") as f:
lines = f.read().strip().split("\n")
kg_ents_uri = [line.split('\t')[1] for line in lines]
n1 = id
for item in kg_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
ids.append(set([i+n1 for i in range(id-n1)]))
return ins2id_dict, id2ins_dict, ids
def recursive_triple_embedding(self,triples, h_embed,r_embed ,t_embed,num_epoch = 2):
h_embed = sklearn.preprocessing.normalize(h_embed,norm="l2", axis=1)
t_embed = sklearn.preprocessing.normalize(t_embed, norm="l2", axis=1)
r_embed = sklearn.preprocessing.normalize(r_embed, norm="l2", axis=1)
temp_ent_in = h_embed.copy()
temp_ent_out = t_embed.copy()
temp_rel_in = h_embed.copy()
temp_rel_out = t_embed.copy()
adj_rel_in = np.zeros(h_embed.shape)
adj_rel_out = | np.zeros(h_embed.shape) | numpy.zeros |
"""
The package is organized as follow :
There is a main class called :obj:`classo_problem`, that contains a lot of information about the problem,
and once the problem is solved, it will also contains the solution.
Here is the global structure of the problem instance:
A :obj:`classo_problem` instance contains a :obj:`Data` instance, a :obj:`Formulation` instance, a :obj:`Model_selection` instance and a :obj:`Solution` instance.
A :obj:`Model_selection` instance contains the instances : :obj:`PATHparameters`, :obj:`CVparameters`, :obj:`StabSelparameters`, :obj:`LAMfixedparameters`.
A :obj:`Solution` instance, once is computed, contains the instances : :obj:`solution_PATH`, :obj:`solution_CV`, :obj:`solution_StabSel`, :obj:`solution_LAMfixed`.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from .misc_functions import (
theoretical_lam,
min_LS,
affichage,
check_size
)
# from .misc_functions import tree_to_matrix
from .compact_func import Classo, pathlasso
from .cross_validation import CV
from .stability_selection import stability, selected_param
import matplotlib.patches as mpatches
class classo_problem:
"""Class that contains all the information about the problem.
It also has a representation method so one can print it.
Args:
X (ndarray): Matrix representing the data of the problem.
y (ndarray): Vector representing the output of the problem.
C (str or ndarray, optional ): Matrix of constraints to the problem. If it is 'zero-sum' then the corresponding attribute will be all-one matrix.
Default value : 'zero-sum'
label (list,optional) : list of the labels of each variable. If None, then label are just indices.
Default value : None
Attributes:
data (Data) : object containing the data (matrices) of the problem. Namely : X, y, C and the labels.
formulation (Formulation) : object containing the info about the formulation of the minimization problem we solve.
model_selection (Model_selection) : object containing the parameters we need to do variable selection.
solution (Solution) : object giving caracteristics of the solution of the model_selection that is asked.
Before using the method :func:`solve()` , its componant are empty/null.
numerical_method (str) : name of the numerical method that is used, it can be :
'Path-Alg' (path algorithm) , 'P-PDS' (Projected primal-dual splitting method) , 'PF-PDS' (Projection-free primal-dual splitting method) or 'DR' (Douglas-Rachford-type splitting method).
Default value : 'not specified', which means that the function :func:`choose_numerical_method` will choose it accordingly to the formulation.
"""
def __init__(
self, X, y, C = None, Tree = None, label = None
): # zero sum constraint by default, but it can be any matrix
self.data = Data(X, y, C, Tree = Tree, label = label)
self.formulation = Formulation()
self.model_selection = Model_selection()
self.solution = Solution()
self.numerical_method = "not specified"
# This method is the way to solve the model selections contained in the object Model_selection, with the formulation of 'formulation' and the data.
def solve(self):
"""Method that solves every model required in the attributes of the problem instance
and update the attribute :attr:`solution` with the characteristics of the solution."""
data = self.data
self.solution = Solution()
matrices = (data.X, data.C, data.y)
n, d = len(data.X), len(data.X[0])
if self.formulation.classification:
self.formulation.concomitant = False
if type(self.formulation.e) == str:
if self.formulation.e == "n/2":
self.formulation.e = (
n / 2
) # useful to be able to write e = 'n/2' as it is in the default parameters
elif self.formulation.e == "n":
self.formulation.e = n # same
else:
if self.formulation.huber:
self.formulation.e = n
else:
self.formulation.e = n / 2
if self.formulation.w is not None:
if min(self.formulation.w) < 1e-8:
raise ValueError(
"w has to be positive weights, here it has a value smaller than 1e-8"
)
if len(data.label) > d:
sup = len(data.label) - d
data.label = data.label[sup:]
print(
"too many labels, there for the labels {} have been deleted".format(
data.label[:sup]
)
)
elif len(data.label) < d:
missing = d - len(data.label)
print(
" too few labels, therefore {} labels have been inserted in the end".format(
missing
)
)
data.label = np.array(
list(data.label) + ["missing " + str(i) for i in range(missing)]
)
if self.formulation.intercept:
data.label = np.array(["intercept"] + list(data.label))
yy = data.y - np.mean(data.y)
else:
yy = data.y
if self.formulation.scale_rho:
self.formulation.rho_scaled = self.formulation.rho * np.sqrt(np.mean(yy**2))
else:
self.formulation.rho_scaled = self.formulation.rho
label = data.label
# Compute the path thanks to the class solution_path which contains directely the computation in the initialisation
if self.model_selection.PATH:
self.solution.PATH = solution_PATH(
matrices,
self.model_selection.PATHparameters,
self.formulation,
self.numerical_method,
label,
)
# Compute the cross validation thanks to the class solution_CV which contains directely the computation in the initialisation
if self.model_selection.CV:
self.solution.CV = solution_CV(
matrices,
self.model_selection.CVparameters,
self.formulation,
self.numerical_method,
label,
)
# Compute the Stability Selection thanks to the class solution_SS which contains directely the computation in the initialisation
if self.model_selection.StabSel:
param = self.model_selection.StabSelparameters
param.theoretical_lam = theoretical_lam(int(n * param.percent_nS), d)
if not param.rescaled_lam:
param.theoretical_lam = param.theoretical_lam * int(
n * param.percent_nS
)
self.solution.StabSel = solution_StabSel(
matrices, param, self.formulation, self.numerical_method, label
)
# Compute the c-lasso problem at a fixed lam thanks to the class solution_LAMfixed which contains directely the computation in the initialisation
if self.model_selection.LAMfixed:
param = self.model_selection.LAMfixedparameters
param.theoretical_lam = theoretical_lam(n, d)
if not param.rescaled_lam:
param.theoretical_lam = param.theoretical_lam * n
self.solution.LAMfixed = solution_LAMfixed(
matrices, param, self.formulation, self.numerical_method, label
)
def __repr__(self):
print_parameters = ""
if self.model_selection.LAMfixed:
print_parameters += (
"\n \nLAMBDA FIXED PARAMETERS: "
+ self.model_selection.LAMfixedparameters.__repr__()
)
if self.model_selection.PATH:
print_parameters += (
"\n \nPATH PARAMETERS: "
+ self.model_selection.PATHparameters.__repr__()
)
if self.model_selection.CV:
print_parameters += (
"\n \nCROSS VALIDATION PARAMETERS: "
+ self.model_selection.CVparameters.__repr__()
)
if self.model_selection.StabSel:
print_parameters += (
"\n \nSTABILITY SELECTION PARAMETERS: "
+ self.model_selection.StabSelparameters.__repr__()
)
return (
" \n \nFORMULATION: "
+ self.formulation.__repr__()
+ "\n \n"
+ "MODEL SELECTION COMPUTED: "
+ self.model_selection.__repr__()
+ print_parameters
+ "\n"
)
class Data:
"""Class that contains the data of the problem
ie where matrices and labels are stored.
Args:
X (ndarray): Matrix representing the data of the problem.
y (ndarray): Vector representing the output of the problem.
C (str or array, optional ): Matrix of constraints to the problem. If it is 'zero-sum' then the corresponding attribute will be all-one matrix.
label (list, optional) : list of the labels of each variable. If None, then labels are juste the indices.
Default value : None
Tree (skbio.TreeNode, optional) : taxonomic tree, if not None, then the matrices X and C and the labels will be changed.
Attributes:
X (ndarray): Matrix representing the data of the problem.
y (ndarray): Vector representing the output of the problem.
C (str or array, optional ): Matrix of constraints to the problem. If it is 'zero-sum' then the corresponding attribute will be all-one matrix.
label (list) : list of the labels of each variable. If None, then labels are juste the indices.
tree (skbio.TreeNode or None) : taxonomic tree.
"""
def __init__(self, X, y, C, Tree = None, label = None):
X1, y1, C1 = check_size(X, y, C)
if Tree is None:
if label is None:
self.label = np.array([str(i) for i in range(len(X[0]))])
else:
self.label = np.array(label)
self.X, self.y, self.C, self.tree = X1, y1, C1, None
#else:
# A, label2, subtree = tree_to_matrix(Tree, label, with_repr = True)
# self.tree = subtree
# self.X, self.y, self.C, self.label = (
# X1.dot(A),
# y1,
# C1.dot(A),
# np.array(label2),
# )
class Formulation:
"""Class that contains the information about the formulation of the problem
namely, the type of formulation (R1, R2, R3, R4, C1, C2)
and its parameters like rho, the weigths and the presence of an intercept.
The type of formulation is encoded with boolean huber concomitant and classification
with the rule:
False False False = R1
True False False = R2
False True False = R3
True True False = R4
False False True = C1
True False True = C2
It also has a representation method so one can print it.
Attributes:
huber (bool) : True if the formulation of the problem should be robust.
Default value : False
concomitant (bool) : True if the formulation of the problem should be with an M-estimation of sigma.
Default value : True
classification (bool) : True if the formulation of the problem should be classification (if yes, then it will not be concomitant).
Default value : False
rho (float) : Value of rho for R2 and R4 formulations.
Default value : 1.345
scale_rho (bool) : If set to True, it will become
rho * sqrt( mean( y**2 ) ) while solving the problem
so that it lives on the scale of y
and also usefull so that we don't have the problem with the non strict convexity
(i.e. at least one sample is on the quadratic mode of the huber loss function)
as long as rho is higher than one.
Default value : True
rho_scaled (float): Actual rho after solving
Default value : Not defined
rho_classification (float) : value of rho for huberized hinge loss function for classification ie C2
(it has to be strictly smaller then 1).
Default value : -1.
e (float or string) : value of e in concomitant formulation.
If 'n/2' then it becomes n/2 during the method :func:`solve()`, same for 'n'.
Default value : 'n' if huber formulation ; 'n/2' else
w (numpy ndarray) : array of size d with the weights of the L1 penalization. This has to be positive.
Default value : None (which makes it the 1,...,1 vector)
intercept (bool) : set to true if we should use an intercept.
Default value : False
"""
def __init__(self):
self.huber = False
self.concomitant = True
self.classification = False
self.rho = 1.345
self.scale_rho = True
self.rho_classification = -1.0
self.e = "not specified"
self.w = None
self.intercept = False
def name(self):
if self.classification:
if self.huber:
return "C2"
else:
return "C1"
if self.concomitant:
if self.huber:
return "R4"
else:
return "R3"
if self.huber:
return "R2"
else:
return "R1"
def __repr__(self):
return self.name()
class Model_selection:
"""Class that contains information about the model selections to perform.
It contains boolean that states which one will be computed.
It also contains objects that contain parameters of each computation modes.
It also has a representation method so one can print it.
Attributes:
PATH (bool): True if path should be computed.
Default value : False
PATHparameters (PATHparameters): object containing parameters to compute the lasso-path.
CV (bool): True if Cross Validation should be computed.
Default value : False
CVparameters (CVparameters): object containing parameters to compute the cross-validation.
StabSel (boolean): True if Stability Selection should be computed.
Default value : True
StabSelparameters (StabSelparameters): object containing parameters to compute the stability selection.
LAMfixed (boolean): True if solution for a fixed lambda should be computed.
Default value : False
LAMfixedparameters (LAMfixedparameters): object containing parameters to compute the lasso for a fixed lambda.
"""
def __init__(self, method = "not specified"):
# Model selection variables
self.PATH = False
self.PATHparameters = PATHparameters(method = method)
self.CV = False
self.CVparameters = CVparameters(method = method)
self.StabSel = True # Only model selection that is used by default
self.StabSelparameters = StabSelparameters(method = method)
self.LAMfixed = False
self.LAMfixedparameters = LAMfixedparameters(method = method)
def __repr__(self):
string = ""
if self.LAMfixed:
string += "\n Lambda fixed"
if self.PATH:
string += "\n Path"
if self.CV:
string += "\n Cross Validation"
if self.StabSel:
string += "\n Stability selection"
return string
class PATHparameters:
"""Class that contains the parameters to compute the lasso-path.
It also has a representation method so one can print it.
Attributes:
numerical_method (str) : name of the numerical method that is used, it can be :
'Path-Alg' (path algorithm) , 'P-PDS' (Projected primal-dual splitting method),
'PF-PDS' (Projection-free primal-dual splitting method) or 'DR' (Douglas-Rachford-type splitting method).
Default value : 'not specified', which means that the function :func:`choose_numerical_method` will choose it accordingly to the formulation
n_active (int): if it is higher than 0, then the algo stops computing the path when n_active variables are active.
Then the solution does not change from this point.
Default value : 0
lambdas (numpy.ndarray) : list of rescaled lambdas for computing lasso-path.
Default value : None, which means line space between 1 and :attr:`lamin` and :attr:`Nlam` points, with logarithm scale or not depending on :attr:`logscale`.
Nlam (int) : number of points in the lambda-path if :attr:`lambdas` is still None (default).
Default value : 80
lamin (float) : lambda minimum if :attr:`lambdas` is still None (default).
Default value : 1e-3
logscale (bool): when :attr:`lambdas` is set to None (default), this parameters tells if it should be set with log scale or not.
Default value : True
plot_sigma (bool) : if True then the representation method of the solution will also plot the sigma-path if it is computed (formulation R3 or R4).
Default value : True
label (numpy.ndarray of str) : labels on each coefficient.
"""
def __init__(self, method = "not specified"):
self.formulation = "not specified"
self.numerical_method = method
self.n_active = 0
self.Nlam = 80
self.lamin = 1e-3
self.logscale = True
self.lambdas = None
self.plot_sigma = True
self.rescaled_lam = True
def __repr__(self):
if self.lambdas is not None:
self.Nlam = len(self.lambdas)
self.lamin = min(self.lambdas)
typ = " "
else:
if self.logscale:
typ = "with log-scale"
else:
typ = "with linear-scale"
string = "\n numerical_method : " + str(self.numerical_method)
string += "\n lamin = " + str(self.lamin)
string += "\n Nlam = " + str(self.Nlam)
string += "\n " + typ
if self.n_active > 0:
string += "\n maximum active variables = " + str(self.n_active)
return string
class CVparameters:
"""Class that contains the parameters to compute the cross-validation.
It also has a representation method so one can print it.
Attributes:
seed (bool or int, optional) : Seed for random values, for an equal seed, the result will be the same. If set to False/None: pseudo-random seed.
Default value : 0
numerical_method (str) : name of the numerical method that is used, can be :
'Path-Alg' (path algorithm) , 'P-PDS' (Projected primal-dual splitting method),
'PF-PDS' (Projection-free primal-dual splitting method) or 'DR' (Douglas-Rachford-type splitting method).
Default value : 'not specified', which means that the function :func:`choose_numerical_method` will choose it accordingly to the formulation.
lambdas (numpy.ndarray) : list of rescaled lambdas for computing lasso-path.
Default value : None, which means line space between 1 and :attr:`lamin` and :attr:`Nlam` points, with logarithm scale or not depending on :attr:`logscale`.
Nlam (int) : number of points in the lambda-path if :attr:`lambdas` is still None (default).
Default value : 80
lamin (float) : lambda minimum if :attr:`lambdas` is still None (default).
Default value : 1e-3
logscale (bool): when :attr:`lambdas` is set to None (default), this parameters tells if it should be set with log scale or not.
Default value : True
oneSE (bool) : if set to True, the selected lambda is computed with method 'one-standard-error'.
Default value : True
Nsubset (int): number of subset in the cross validation method.
Default value : 5
"""
def __init__(self, method = "not specified"):
self.seed = 0
self.formulation = "not specified"
self.numerical_method = method
self.Nsubset = 5 # Number of subsets used
self.Nlam = 80
self.lamin = 1e-3
self.logscale = True
self.lambdas = None
self.oneSE = True
def __repr__(self):
if self.lambdas is not None:
self.Nlam = len(self.lambdas)
self.lamin = min(self.lambdas)
typ = " "
else:
if self.logscale:
typ = "with log-scale"
else:
typ = "with linear-scale"
string = "\n numerical_method : " + str(self.numerical_method)
string += "\n one-SE method : " + str(self.oneSE)
string += "\n Nsubset = " + str(self.Nsubset)
string += "\n lamin = " + str(self.lamin)
string += "\n Nlam = " + str(self.Nlam)
string += "\n " + typ
return string
class StabSelparameters:
"""Class that contains the parameters to compute the stability selection.
It also has a representation method so one can print it.
Attributes:
seed (bool or int, optional) : Seed for random values, for an equal seed, the result will be the same. If set to False/None: pseudo-random seed.
Default value : 123
numerical_method (str) : name of the numerical method that is used, can be :
'Path-Alg' (path algorithm) , 'P-PDS' (Projected primal-dual splitting method) , 'PF-PDS' (Projection-free primal-dual splitting method) or 'DR' (Douglas-Rachford-type splitting method).
Default value : 'not specified', which means that the function :func:`choose_numerical_method` will choose it accordingly to the formulation.
lam (float or str) : (only used if :obj:`method` = 'lam') lam for which the lasso should be computed.
Default value : 'theoretical' which mean it will be equal to :obj:`theoretical_lam` once it is computed.
rescaled_lam (bool) : (only used if :obj:`method` = 'lam') False if lam = lambda, False if lam = lambda/lambdamax which is between 0 and 1.
If False and lam = 'theoretical' , then it will take the value n*theoretical_lam.
Default value : True
theoretical_lam (float) : (only used if :obj:`method` = 'lam') Theoretical lam.
Default value : 0.0 (once it is not computed yet, it is computed thanks to the function :func:`theoretical_lam` used in :meth:`classo_problem.solve`).
method (str) : 'first', 'lam' or 'max' depending on the type of stability selection we do.
Default value : 'first'
B (int) : number of subsample considered.
Default value : 50
q (int) : number of selected variable per subsample.
Default value : 10
percent_nS (float) : size of subsample relatively to the total amount of sample.
Default value : 0.5
lamin (float) : lamin when computing the lasso-path for method 'max'.
Default value : 1e-2
hd (bool) : if set to True, then the 'max' will stop when it reaches n-k actives variables.
Default value : False
threshold (float) : threshold for stability selection.
Default value : 0.7
threshold_label (float) : threshold to know when the label should be plot on the graph.
Default value : 0.4
"""
def __init__(self, method = "not specified"):
self.seed = 123
self.formulation = "not specified"
self.numerical_method = method
self.method = "first" # Can be 'first' ; 'max' or 'lam'
self.B = 50
self.q = 10
self.percent_nS = 0.5
self.Nlam = 50 # for path computation
self.lamin = 1e-2 # the lambda where one stop for 'max' method
self.hd = False # if set to True, then the 'max' will stop when it reaches n-k actives variables
self.lam = "theoretical" # can also be a float, for the 'lam' method
self.rescaled_lam = True
self.threshold = 0.7
self.threshold_label = 0.4
self.theoretical_lam = 0.0
def __repr__(self):
string = "\n numerical_method : " + str(self.numerical_method)
string += "\n method : " + str(self.method)
string += "\n B = " + str(self.B)
string += "\n q = " + str(self.q)
string += "\n percent_nS = " + str(self.percent_nS)
string += "\n threshold = " + str(self.threshold)
if self.method == "lam":
string += "\n lam = " + str(self.lam)
if self.theoretical_lam != 0.0:
string += "\n theoretical_lam = " + str(
round(self.theoretical_lam, 4)
)
else:
string += "\n lamin = " + str(self.lamin)
string += "\n Nlam = " + str(self.Nlam)
return string
class LAMfixedparameters:
"""Class that contains the parameters to compute the lasso for a fixed lambda.
It also has a representation method so one can print it.
Attributes:
numerical_method (str) : name of the numerical method that is used, can be :
'Path-Alg' (path algorithm) , 'P-PDS' (Projected primal-dual splitting method) , 'PF-PDS' (Projection-free primal-dual splitting method) or 'DR' (Douglas-Rachford-type splitting method).
Default value : 'not specified', which means that the function :func:`choose_numerical_method` will choose it accordingly to the formulation
lam (float or str) : lam for which the lasso should be computed.
Default value : 'theoretical' which mean it will be equal to :obj:`theoretical_lam` once it is computed
rescaled_lam (bool) : False if lam = lambda, True if lam = lambda/lambdamax which is between 0 and 1.
If False and lam = 'theoretical' , then it will takes the value n*theoretical_lam.
Default value : True
theoretical_lam (float) : Theoretical lam.
Default value : 0.0 (once it is not computed yet, it is computed thanks to the function :func:`theoretical_lam` used in :meth:`classo_problem.solve`).
threshold (float) : Threshold such that the parameters i selected or the ones such as the absolute value of beta[i] is greater than the threshold.
If None, then it will be set to the average of the absolute value of beta.
Default value : None
"""
def __init__(self, method = "not specified"):
self.lam = "theoretical"
self.formulation = "not specified"
self.numerical_method = method
self.rescaled_lam = True
self.theoretical_lam = 0.0
self.threshold = None
def __repr__(self):
string = "\n numerical_method = " + str(self.numerical_method)
string += "\n rescaled lam : " + str(self.rescaled_lam)
if self.threshold is None:
string += "\n threshold : average of the absolute value of beta"
else:
string += "\n threshold = " + str(round(self.threshold, 3))
if type(self.lam) is str:
string += "\n lam : " + self.lam
else:
string += "\n lam = " + str(round(self.lam, 3))
if self.theoretical_lam != 0.0:
string += "\n theoretical_lam = " + str(round(self.theoretical_lam, 4))
return string
class Solution:
"""Class that contains characteristics of the solution of the model_selections that are computed
Before using the method :func:`solve()` , its componant are empty/null.
It also has a representation method so one can print it.
Attributes:
PATH (solution_PATH): Solution components of the model PATH.
CV (solution_CV): Solution components of the model CV.
StabelSel (solution_StabSel): Solution components of the model StabSel.
LAMfixed (solution_LAMfixed): Solution components of the model LAMfixed.
"""
def __init__(self):
self.PATH = "not computed" # this will be filled with an object of the class 'solution_PATH' when the method solve() will be used.
self.CV = "not computed" # will be an object of the class 'solution_PATH'
self.StabSel = (
"not computed" # will be an object of the class 'solution_StabSel'
)
self.LAMfixed = "not computed"
def __repr__(self):
string = ""
if not type(self.LAMfixed) is str:
string += self.LAMfixed.__repr__() + "\n"
if not type(self.PATH) is str:
string += self.PATH.__repr__() + "\n"
if not type(self.CV) is str:
string += self.CV.__repr__() + "\n"
if not type(self.StabSel) is str:
string += self.StabSel.__repr__() + "\n"
return string
# Here, the main function used is pathlasso ; from the file compact_func
class solution_PATH:
"""Class that contains characteristics of the lasso-path computed,
which also contains representation method that plot the graphic of this lasso-path.
Attributes:
BETAS (numpy.ndarray) : array of size Npath x d with the solution beta for each lambda on each row.
SIGMAS (numpy.ndarray) : array of size Npath with the solution sigma for each lambda when the formulation of the problem is R2 or R4.
LAMBDAS (numpy.ndarray) : array of size Npath with the lambdas (real lambdas, not divided by lambda_max) for which the solution is computed.
logscale (bool): whether or not the path should be plotted with a logscale.
method (str) : name of the numerical method that has been used. It can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'.
save (bool or str) : if it is a str, then it gives the name of the file where the graphics has been/will be saved (after using print(solution) ).
formulation (Formulation) : object containing the info about the formulation of the minimization problem we solve.
time (float) : running time of this action.
"""
def __init__(self, matrices, param, formulation, numerical_method, label):
t0 = time()
# Formulation choosing
if param.formulation == "not specified":
param.formulation = formulation
if param.numerical_method == "not specified":
param.numerical_method = numerical_method
name_formulation = param.formulation.name()
rho = param.formulation.rho_scaled
rho_classification = param.formulation.rho_classification
e = param.formulation.e
# Algorithmic method choosing
numerical_method = choose_numerical_method(
param.numerical_method, "PATH", param.formulation
)
param.numerical_method = numerical_method
# Compute the solution and is the formulation is concomitant, it also compute sigma
if param.lambdas is None:
if param.logscale:
param.lambdas = np.array(
[param.lamin ** (i / (param.Nlam - 1)) for i in range(param.Nlam)]
)
else:
param.lambdas = np.linspace(1.0, param.lamin, param.Nlam)
self.logscale = param.logscale
out = pathlasso(
matrices,
lambdas = param.lambdas,
n_active = param.n_active,
typ = name_formulation,
meth = numerical_method,
return_sigm = True,
rho = rho,
e = e,
rho_classification = rho_classification,
w = param.formulation.w,
intercept = param.formulation.intercept,
true_lam = not param.rescaled_lam
)
if formulation.concomitant:
self.BETAS, self.LAMBDAS, self.SIGMAS = out
else:
self.BETAS, self.LAMBDAS = out
self.SIGMAS = "not computed"
self.formulation = formulation
self.plot_sigma = param.plot_sigma
self.method = numerical_method
self.save = False
self.label = label
self.time = time() - t0
def __repr__(self):
string = "\n PATH COMPUTATION : "
d = len(self.BETAS[0])
if (
d > 20
): # this trick is to plot only the biggest value, excluding the intercept
avg_betas = np.mean(abs(np.array(self.BETAS)), axis = 0)
if self.formulation.intercept:
avg_betas[0] = 0 # trick to exclude intercept in the graph
string += "\n There is also an intercept. "
top = np.argpartition(avg_betas, -20)[-20:]
else:
if self.formulation.intercept:
top = np.arange(1, d)
string += "\n There is also an intercept. "
else:
top = | np.arange(d) | numpy.arange |
import unittest
import numpy as np
from nptest import nptest
class LargeArrayTests(unittest.TestCase):
def test_largearray_matmul_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat)
print(z)
def test_largearray_matmul_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_1(self):
width = 2048
height = 2048
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_2(self):
width = 4096
height = 4096
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(1, width), y_range.reshape(height, 1))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_copy_int64_1(self):
length = 268435435 # (Int32.MaxValue) / sizeof(double) - 20;
x = np.arange(0, length, 1, dtype = np.int64);
z = np.sum(x);
print(z)
y = x.copy()
z = np.sum(y)
print(z)
def test_largearray_copy_int64_2(self):
length = 268435434 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
z = np.sum(x, axis=0);
z = np.sum(z)
print(z)
y = x.copy()
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_meshgrid_int64_2(self):
length = 100 * 100
x = np.arange(0,length, 1, dtype = np.int64)
x1, x2 = np.meshgrid(x,x)
print(x1.shape)
print(x2.shape)
z = np.sum(x1)
print(z)
z = np.sum(x2)
print(z)
def test_largearray_checkerboard_1(self):
x = np.zeros((2048,2048),dtype=int)
x[1::2,::2] = 1
x[::2,1::2] = 1
print(np.sum(x))
def test_largearray_byteswap_int64_2(self):
length = 1024 * 1024* 32 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
y = x.byteswap();
z = np.sum(y, axis=0);
z = np.sum(z)
print(z)
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_unique_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
matrix = matrix[1:40:2, 1:-2:1]
uvalues, indexes, inverse, counts = np.unique(matrix, return_counts = True, return_index=True, return_inverse=True);
print(np.sum(uvalues))
print(np.sum(indexes))
print(np.sum(inverse))
print(np.sum(counts))
def test_largearray_where_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
print(np.sum(matrix))
indices = np.where(matrix % 2 == 0);
m1 = matrix[indices]
print(np.sum(m1))
def test_largearray_insert_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.insert(matrix, 0, [999,100,101])
print(np.sum(m1))
def test_largearray_append_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.append(matrix, [999,100,101])
print(np.sum(m1))
def test_largearray_concatenate_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.arange(1, 16000001, dtype=np.int64).reshape((40, -1));
c = np.concatenate((a, b), axis=0)
print(np.sum(c))
#d = np.concatenate((a.T, b), axis=1)
#print(np.sum(d))
e = np.concatenate((a, b), axis=None)
print(np.sum(e))
def test_largearray_min_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amin(a)
print(np.sum(b))
b = np.amin(a, axis=0)
print(np.sum(b))
b = np.amin(a, axis=1)
print(np.sum(b))
def test_largearray_max_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amax(a)
print(np.sum(b))
b = np.amax(a, axis=0)
print(np.sum(b))
b = np.amax(a, axis=1)
print(np.sum(b))
def test_largearray_setdiff1d_INT64(self):
a = np.arange(16000000, dtype=np.int64);
b = np.array([3, 4, 5, 6])
c = np.setdiff1d(a, b)
print(np.sum(a))
print(np.sum(b))
print(np.sum(c))
def test_largearray_copyto_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape(-1, 5);
print(np.sum(a))
b = np.array([1, 2, 3, 4, 5])
| np.copyto(a, b) | numpy.copyto |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = | N.array([1,2,2]) | numpy.array |
'''
Remider CodingRules:
Zeilenumbruch bei Spalte 120
Modulname, Klassennamen als CamelCase
Variablennamen, Methodennamen, Funktionsnamen mit unter_strichen
Bitte nicht CamelCase und Unterstriche mischen
'''
'''
I have made the decission to not include anything form the tests
or from the original code itself.
This module should be able to work on its own, but it will be with basically no explanation in the code itself
for this look at the simpleFlowsTest.
'''
# imports
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
# initial variables and sizes
steps = 10000
size_x = 300
size_y = 300
k_y = 2*np.pi/size_x # why did i name this ky and not just periode
amplitude_global = 0.1
periode = 1
relaxation_global = 0.2
velocity_set = np.array([[0, 1, 0, -1, 0, 1, -1, -1, 1],
[0,0,1,0,-1,1,1,-1,-1]]).T
# main functions
def stream(grid):
for i in range(1,9):
grid[i] = np.roll(grid[i],velocity_set[i], axis = (0,1))
def equilibrium(rho,ux,uy):
uxy_3plus = 3 * (ux + uy)
uxy_3miuns = 3 * (ux - uy)
uu = 3 * (ux * ux + uy * uy)
ux_6 = 6 * ux
uy_6 = 6 * uy
uxx_9 = 9 * ux * ux
uyy_9 = 9 * uy * uy
uxy_9 = 9 * ux * uy
return np.array([(2 * rho / 9) * (2 - uu),
(rho / 18) * (2 + ux_6 + uxx_9 - uu),
(rho / 18) * (2 + uy_6 + uyy_9 - uu),
(rho / 18) * (2 - ux_6 + uxx_9 - uu),
(rho / 18) * (2 - uy_6 + uyy_9 - uu),
(rho / 36) * (1 + uxy_3plus + uxy_9 + uu),
(rho / 36) * (1 - uxy_3miuns - uxy_9 + uu),
(rho / 36) * (1 - uxy_3plus + uxy_9 + uu),
(rho / 36) * (1 + uxy_3miuns - uxy_9 + uu)])
def collision(grid,rho,ux,uy):
grid -= relaxation_global * (grid - equilibrium(rho, ux, uy))
def collision_with_relaxation(grid,rho,ux,uy,relaxxation):
grid -= relaxxation * (grid - equilibrium(rho, ux, uy))
def caluculate_rho_ux_uy(grid):
rho = np.sum(grid, axis=0) # sums over each one individually
ux = ((grid[1] + grid[5] + grid[8]) - (grid[3] + grid[6] + grid[7])) / rho
uy = ((grid[2] + grid[5] + grid[6]) - (grid[4] + grid[7] + grid[8])) / rho
return rho,ux,uy
# fit stuff
def theo_Exp(x, v):
return amplitude_global * np.exp(-v*k_y*k_y*x)
def theo_exp_with_variables(x,v,ky,amplitud):
return amplitud * np.exp(-v * ky * ky * x)
# main body
def shear_wave_decay():
'''
Original Shear Wave simulatates the function an then fits the exponential decay to it
Returns
-------
'''
print("Shear Wave Decay")
# shear wave
x_values = k_y * np.arange(0,size_x)
shear_wave = amplitude_global * np.sin(periode * x_values)
# initizlize the gird
rho = np.ones((size_x, size_y))
ux = np.zeros((size_x, size_y))
ux[:, :] = shear_wave
uy = np.zeros((size_x, size_y))
grid = equilibrium(rho, ux, uy)
amplitude_array = []
# loop
for i in range(steps):
# standard procedure
stream(grid)
rho,ux,uy = caluculate_rho_ux_uy(grid)
collision(grid,rho,ux,uy)
###
# analize the amplitude
ux_fft = np.fft.fft(ux[int(size_x/2),:])
ampl = 2/size_y* np.abs(ux_fft)
ampl = np.max(ampl)
amplitude_array.append(ampl)
# theoretical solution
x = np.arange(0,steps)
v = 1/3 * (1/relaxation_global - 1/2)
# some sort of -e-fkt
u_theo = amplitude_global * np.exp(-v*k_y*k_y*x)
###
param,cv = scipy.optimize.curve_fit(theo_Exp,x,amplitude_array)
v_s = param[0]
#print(v_s)
#print(v)
# visualize
fig, ax = plt.subplots()
textstr = '\n'.join((
r'size = %d x %d' % (size_x,size_y ),
r'omega = %.02f' % (relaxation_global,),
r'amplitude = %.02f' % (amplitude_global,),
r'v_theo = %.02f' % (v,),
r'v_sim = %.02f' % (v_s,)
))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.71, 0.82, textstr, transform=ax.transAxes, fontsize=11,
verticalalignment='top', bbox=props)
plt.plot(amplitude_array, label = "Simulated")
plt.plot(u_theo, color = "red",label = "Theoretically")
plt.title("Shear Wave Decay")
plt.ylabel("Amplitude")
plt.xlabel("# of steps")
plt.legend()
plt.show()
def shear_wave_decay_more(amplitude,relaxation,ky):
'''
Calls the shear_wave_decay with
Parameters
----------
amplitude
relaxxation
ky
Returns
-------
'''
# return Params
v_theoretical = 0
v_simualated = 0
amplitude_array = []
x_values = ky * np.arange(0, size_x)
shear_wave = amplitude * np.sin(periode * x_values)
# initizlize the gird
rho = np.ones((size_x, size_y))
ux = np.zeros((size_x, size_y))
ux[:, :] = shear_wave
uy = np.zeros((size_x, size_y))
grid = equilibrium(rho, ux, uy)
# loop
for i in range(steps):
# standard procedure
stream(grid)
rho, ux, uy = caluculate_rho_ux_uy(grid)
collision_with_relaxation(grid, rho, ux, uy,relaxation)
###
# analize the amplitude
ux_fft = np.fft.fft(ux[int(size_x / 2), :])
ampl = 2 / size_y * np.abs(ux_fft)
ampl = np.max(ampl)
amplitude_array.append(ampl)
# v_theoretical
x = np.arange(0, steps)
v_theoretical = 1 / 3 * (1 / relaxation - 1 / 2)
# some sort of -e-fkt
amplitude_theo = amplitude * np.exp(-v_theoretical * ky * ky * x)
# v_simulated
# lambda wrapper for ky and amplitude
param, cv = scipy.optimize.curve_fit(lambda x,v : theo_exp_with_variables(x,v,ky,amplitude), x, amplitude_array)
v_simualated = param[0]
return v_theoretical, v_simualated,amplitude_theo, amplitude_array
def rapid_call():
print("Mass caller, Generate six")
# put v theo and v sim in the labels
# original amplitude
v_theoretical_array = []
v_siumlated_array = []
amplitude_theo_array = []
ampitude_array_array = []
runs = 8
#### Setup
# cal patterns
amplitud = np.array([0.1,0.1,0.1,0.1,0.3,0.3,0.3,0.3])
relaxxation = np.array([0.2,0.2,1.5,1.5,0.2,0.2,1.5,1.5])
nr = np.array([1, 2, 1, 2, 1, 2, 1, 2])
ky = nr * k_y
# running
for i in range(runs):
# fkt
v_theoretical, v_simualated, amplitude_theo, amplitude_array = shear_wave_decay_more(amplitud[i],relaxxation[i] , ky[i])
# append
v_theoretical_array.append(v_theoretical)
v_siumlated_array.append(v_simualated)
amplitude_theo_array.append(amplitude_theo)
ampitude_array_array.append(amplitude_array)
# plotting
x = 0
y = 0
fig_size = (10*2.5,8*2.5)
axs = plt.figure(figsize = fig_size).subplots(4,2)
for i in range(runs):
# plotting
axs[y, x].plot(amplitude_theo_array[i],label = "Theoretically")
axs[y, x].plot(ampitude_array_array[i],label = "Simulated")
axs[y,x].legend()
title_string = ''.join((r'v_theo = %.02f, v_sim = %.02f' % (v_theoretical_array[i],v_siumlated_array[i])))
x_lable_string = ''.join((r'Relaxation %.02f, %d * k_y, Amplitude = %.02f' % (relaxxation[i],nr[i],amplitud[i])))
axs[y,x].set_title(title_string)
axs[y,x].set_xlabel(x_lable_string)
# counting
x +=1
if x == 2:
x = 0
if (i+1) % 2 == 0 and i != 0:
y +=1
plt.show()
def shear_wave_decay_fft_analyise(amplitude,relaxation,ky_factor):
print("Fourier Analysis of the shear wave decay")
# stuff for the basic simulation
ky = k_y * ky_factor
x_values = ky * np.arange(0, size_x)
shear_wave = amplitude * np.sin(periode * x_values)
# initizlize the gird
rho = np.ones((size_x, size_y))
ux = np.zeros((size_x, size_y))
ux[:, :] = shear_wave
uy = np.zeros((size_x, size_y))
grid = equilibrium(rho, ux, uy)
# loop
for i in range(steps):
# standard procedure
stream(grid)
rho, ux, uy = caluculate_rho_ux_uy(grid)
collision_with_relaxation(grid, rho, ux, uy, relaxation)
# fft analysiation
# should only make sense after the loop as we do not recorde the amplitude all the time
freq_y, transform_y = do_fft_analysis(uy[int(size_x / 2), :])
freq_x, transform_x = do_fft_analysis(ux[int(size_x / 2), :])
plt.plot(freq_x,transform_x, label = "ux")
plt.plot(freq_y, transform_y, label = "uy")
plt.legend()
plt.show()
def shear_wave_different_times(amplitude,relaxation,ky_factor):
print("Shear Wave Decay Fourier Analysis at different timesteps")
# stuff for the basic simulation
runs = 10000
ky = ky_factor* k_y
x_values = ky * np.arange(0, size_x)
shear_wave = amplitude * np.sin(periode * x_values)
# initizlize the gird
rho = np.ones((size_x, size_y))
ux = np.zeros((size_x, size_y))
ux[:, :] = shear_wave
uy = np.zeros((size_x, size_y))
grid = equilibrium(rho, ux, uy)
#
plt.figure(figsize=(12,9), dpi = 100)
# loop
for i in range(runs +1):
# standard procedure
stream(grid)
rho, ux, uy = caluculate_rho_ux_uy(grid)
collision_with_relaxation(grid, rho, ux, uy, relaxation)
# every 1000 runs do an analysis
# plot it into one diagram only analyse ux
# label_string = ""
fig_size = (10 * 2.5, 8 * 2.5)
axs = plt.figure(figsize=fig_size).subplots(2, 2)
# calcs
freq_x, fourier_x = do_fft_analysis(ux[int(size_x / 2), :])
freq_y, fourier_y = do_fft_analysis(uy[int(size_x / 2), :])
fourier_x = fourier_x/np.linalg.norm(fourier_x)
fourier_y = fourier_y / np.linalg.norm(fourier_y)
##
axs[0, 0].plot(freq_x,fourier_x)
axs[0, 0].set_xlabel("Wave number")
axs[0, 0].set_ylabel("Amplitude vx(ky)")
##
axs[1, 0].plot(freq_y,fourier_y)
axs[1, 0].set_xlabel("Wave number")
axs[1, 0].set_ylabel("Amplitude vy(ky)")
###
freq_x, fourier_x = do_fft_analysis(ux[: ,int(size_x / 2)])
freq_y, fourier_y = do_fft_analysis(uy[: ,int(size_x / 2)])
fourier_x = fourier_x / np.linalg.norm(fourier_x)
fourier_y = fourier_y / np.linalg.norm(fourier_y)
####
axs[0, 1].plot(freq_x, fourier_x)
axs[0, 1].set_xlabel("Wave number")
axs[0, 1].set_ylabel("Amplitude vx(kx)")
##
axs[1, 1].plot(freq_y, fourier_y)
axs[1, 1].set_xlabel("Wave number")
axs[1, 1].set_ylabel("Amplitude vy(kx)")
title_string = "Amplitude {}".format(amplitude) \
+ " ,relaxation {}".format(relaxation) + \
" , {}*ky".format(ky_factor) \
+ ", size {}".format(size_x)
plt.suptitle(title_string)
plt.show()
def shear_wave_decay_return(amplitude,relaxation,ky_factor):
# stuff for the basic simulation
ky = k_y * ky_factor
x_values = ky * np.arange(0, size_x)
shear_wave = amplitude * np.sin(periode * x_values)
# initialize the gird
rho = np.ones((size_x, size_y))
ux = np.zeros((size_x, size_y))
ux[:, :] = shear_wave
uy = np.zeros((size_x, size_y))
grid = equilibrium(rho, ux, uy)
# loop
for i in range(steps):
# standard procedure
stream(grid)
rho, ux, uy = caluculate_rho_ux_uy(grid)
collision_with_relaxation(grid, rho, ux, uy, relaxation)
return ux, uy
def analyse_different_values():
print("Analyse diffrent k_ys")
# call patterns
num_of_patterns = 8
amplitude = 0.1
relaxation = 0.2
amplitude_call_pattern = np.ones(num_of_patterns) * amplitude
relaxation_call_pattern = np.ones(num_of_patterns) * relaxation
ky_factor_call_pattern = (np.arange(num_of_patterns)+1) * 2
# save bins
ux_bin = []
uy_bin = []
freq_x_bin = []
fourier_x_bin = []
freq_y_bin = []
fourier_y_bin = []
# run all patterns
for i in range(num_of_patterns):
# call function
ux, uy = shear_wave_decay_return(amplitude_call_pattern[i],relaxation_call_pattern[i],ky_factor_call_pattern[i])
# only save the value in the middle the rest can be discarded
ux_bin.append(ux[int(size_x / 2), :])
uy_bin.append(uy[int(size_x / 2), :])
# do a fft analysis
for i in range(num_of_patterns):
freq_x, fourier_x = do_fft_analysis(ux_bin[i])
freq_y, fourier_y = do_fft_analysis(uy_bin[i])
# append
freq_x_bin.append(freq_x)
freq_y_bin.append(freq_y)
fourier_x_bin.append(fourier_x)
fourier_y_bin.append(fourier_y)
# plotting
x = 0
y = 0
fig_size = (10 * 2, 8 * 2)
axs = plt.figure(figsize=fig_size).subplots(4, 2)
for i in range(num_of_patterns):
# actual plotting
axs[y, x].plot(freq_x_bin[i],fourier_x_bin[i],label = "ux")
axs[y, x].plot(freq_y_bin[i],fourier_y_bin[i],label = "uy")
title_string = "Amplitude {}".format(amplitude_call_pattern[i]) \
+ " ,relaxation {}".format(relaxation_call_pattern[i]) + \
" , {}*ky".format(ky_factor_call_pattern[i]) \
+", size {}".format(size_x)
axs[y,x].set_title(title_string)
axs[y, x].set_xlabel("Frequency")
axs[y, x].set_ylabel("Amplitude")
axs[y,x].legend()
# counting
x += 1
if x == 2:
x = 0
if (i + 1) % 2 == 0 and i != 0:
y += 1
# dont forget
plt.show()
def plotter_shear_wave():
sample_freq = size_x
sample_time = 1/sample_freq
amplitude = 0.1
ky = k_y
x_values = ky * np.arange(0, size_x)
shear_wave = amplitude * | np.sin(periode * x_values) | numpy.sin |
from .. import util
from ..probabilities import pulsars, mass
from ..core.data import Observations, Model
import h5py
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_clr
import astropy.visualization as astroviz
__all__ = ['ModelVisualizer', 'CIModelVisualizer', 'ObservationsVisualizer']
class _ClusterVisualizer:
_MARKERS = ('o', '^', 'D', '+', 'x', '*', 's', 'p', 'h', 'v', '1', '2')
# Default xaxis limits for all profiles. Set by inits, can be reset by user
rlims = None
# -----------------------------------------------------------------------
# Artist setups
# -----------------------------------------------------------------------
def _setup_artist(self, fig, ax, *, use_name=True):
'''setup a plot (figure and ax) with one single ax'''
if ax is None:
if fig is None:
# no figure or ax provided, make one here
fig, ax = plt.subplots()
else:
# Figure provided, no ax provided. Try to grab it from the fig
# if that doens't work, create it
cur_axes = fig.axes
if len(cur_axes) > 1:
raise ValueError(f"figure {fig} already has too many axes")
elif len(cur_axes) == 1:
ax = cur_axes[0]
else:
ax = fig.add_subplot()
else:
if fig is None:
# ax is provided, but no figure. Grab it's figure from it
fig = ax.get_figure()
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
return fig, ax
def _setup_multi_artist(self, fig, shape, *, allow_blank=True,
use_name=True, constrained_layout=True,
subfig_kw=None, **sub_kw):
'''setup a subplot with multiple axes'''
if subfig_kw is None:
subfig_kw = {}
def create_axes(base, shape):
'''create the axes of `shape` on this base (fig)'''
# make sure shape is a tuple of atleast 1d, at most 2d
if not isinstance(shape, tuple):
# TODO doesnt work on an int
shape = tuple(shape)
if len(shape) == 1:
shape = (shape, 1)
elif len(shape) > 2:
mssg = f"Invalid `shape` for subplots {shape}, must be 2D"
raise ValueError(mssg)
# split into dict of nrows, ncols
shape = dict(zip(("nrows", "ncols"), shape))
# if either of them is also a tuple, means we want columns or rows
# of varying sizes, switch to using subfigures
# TODO what are the chances stuff like `sharex` works correctly?
if isinstance(shape['nrows'], tuple):
subfigs = base.subfigures(ncols=shape['ncols'], nrows=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nr = shape['nrows'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of row entries {shape['nrows']} must "
f"match number of columns ({shape['ncols']})")
raise ValueError(mssg)
sf.subplots(ncols=1, nrows=nr, **sub_kw)
elif isinstance(shape['ncols'], tuple):
subfigs = base.subfigures(nrows=shape['nrows'], ncols=1,
squeeze=False, **subfig_kw)
for ind, sf in enumerate(subfigs.flatten()):
try:
nc = shape['ncols'][ind]
except IndexError:
if allow_blank:
continue
mssg = (f"Number of col entries {shape['ncols']} must "
f"match number of rows ({shape['nrows']})")
raise ValueError(mssg)
sf.subplots(nrows=1, ncols=nc, **sub_kw)
# otherwise just make a simple subplots and return that
else:
base.subplots(**shape, **sub_kw)
return base, base.axes
# ------------------------------------------------------------------
# Create figure, if necessary
# ------------------------------------------------------------------
if fig is None:
fig = plt.figure(constrained_layout=constrained_layout)
# ------------------------------------------------------------------
# If no shape is provided, just return the figure, probably empty
# ------------------------------------------------------------------
if shape is None:
axarr = []
# ------------------------------------------------------------------
# Otherwise attempt to first grab this figures axes, or create them
# ------------------------------------------------------------------
else:
# this fig has axes, check that they match shape
if axarr := fig.axes:
# TODO this won't actually work, cause fig.axes is just a list
if axarr.shape != shape:
mssg = (f"figure {fig} already contains axes with "
f"mismatched shape ({axarr.shape} != {shape})")
raise ValueError(mssg)
else:
fig, axarr = create_axes(fig, shape)
# ------------------------------------------------------------------
# If desired, default to titling the figure based on it's "name"
# ------------------------------------------------------------------
if hasattr(self, 'name') and use_name:
fig.suptitle(self.name)
# ------------------------------------------------------------------
# Ensure the axes are always returned in an array
# ------------------------------------------------------------------
return fig, np.atleast_1d(axarr)
# -----------------------------------------------------------------------
# Unit support
# -----------------------------------------------------------------------
def _support_units(method):
import functools
@functools.wraps(method)
def _unit_decorator(self, *args, **kwargs):
# convert based on median distance parameter
eqvs = util.angular_width(self.d)
with astroviz.quantity_support(), u.set_enabled_equivalencies(eqvs):
return method(self, *args, **kwargs)
return _unit_decorator
# -----------------------------------------------------------------------
# Plotting functionality
# -----------------------------------------------------------------------
def _get_median(self, percs):
'''from an array of data percentiles, return the median array'''
return percs[percs.shape[0] // 2] if percs.ndim > 1 else percs
def _get_err(self, dataset, key):
'''gather the error variables corresponding to `key` from `dataset`'''
try:
return dataset[f'Δ{key}']
except KeyError:
try:
return (dataset[f'Δ{key},down'], dataset[f'Δ{key},up'])
except KeyError:
return None
def _plot_model(self, ax, data, intervals=None, *,
x_data=None, x_unit='pc', y_unit=None,
CI_kwargs=None, **kwargs):
CI_kwargs = dict() if CI_kwargs is None else CI_kwargs
# ------------------------------------------------------------------
# Evaluate the shape of the data array to determine confidence
# intervals, if applicable
# ------------------------------------------------------------------
if data is None or data.ndim == 0:
return
elif data.ndim == 1:
data = data.reshape((1, data.size))
if not (data.shape[0] % 2):
mssg = 'Invalid `data`, must have odd-numbered zeroth axis shape'
raise ValueError(mssg)
midpoint = data.shape[0] // 2
if intervals is None:
intervals = midpoint
elif intervals > midpoint:
mssg = f'{intervals}σ is outside stored range of {midpoint}σ'
raise ValueError(mssg)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
x_domain = self.r if x_data is None else x_data
if x_unit:
x_domain = x_domain.to(x_unit)
if y_unit:
data = data.to(y_unit)
# ------------------------------------------------------------------
# Plot the median (assumed to be the middle axis of the intervals)
# ------------------------------------------------------------------
median = data[midpoint]
med_plot, = ax.plot(x_domain, median, **kwargs)
# ------------------------------------------------------------------
# Plot confidence intervals successively from the midpoint
# ------------------------------------------------------------------
output = [med_plot]
CI_kwargs.setdefault('color', med_plot.get_color())
alpha = 0.8 / (intervals + 1)
for sigma in range(1, intervals + 1):
CI = ax.fill_between(
x_domain, data[midpoint + sigma], data[midpoint - sigma],
alpha=(1 - alpha), **CI_kwargs
)
output.append(CI)
alpha += alpha
return output
def _plot_data(self, ax, dataset, y_key, *,
x_key='r', x_unit='pc', y_unit=None,
err_transform=None, **kwargs):
# TODO need to handle colours better
defaultcolour = None
# ------------------------------------------------------------------
# Get data and relevant errors for plotting
# ------------------------------------------------------------------
xdata = dataset[x_key]
ydata = dataset[y_key]
xerr = self._get_err(dataset, x_key)
yerr = self._get_err(dataset, y_key)
# ------------------------------------------------------------------
# Convert any units desired
# ------------------------------------------------------------------
if x_unit is not None:
xdata = xdata.to(x_unit)
if y_unit is not None:
ydata = ydata.to(y_unit)
# ------------------------------------------------------------------
# If given, transform errors based on `err_transform` function
# ------------------------------------------------------------------
if err_transform is not None:
yerr = err_transform(yerr)
# ------------------------------------------------------------------
# Setup default plotting details, style, labels
# ------------------------------------------------------------------
kwargs.setdefault('marker', '.')
kwargs.setdefault('linestyle', 'None')
kwargs.setdefault('color', defaultcolour)
# TODO should try to cite, but if that fails just use raw bibcode?
label = dataset.cite()
if 'm' in dataset.mdata:
label += fr' ($m={dataset.mdata["m"]}\ M_\odot$)'
# ------------------------------------------------------------------
# Plot
# ------------------------------------------------------------------
# TODO not sure if I like the mfc=none style,
# mostly due to https://github.com/matplotlib/matplotlib/issues/3400
return ax.errorbar(xdata, ydata, xerr=xerr, yerr=yerr, mfc='none',
label=label, **kwargs)
def _plot_profile(self, ax, ds_pattern, y_key, model_data, *,
residuals=False, err_transform=None,
**kwargs):
'''figure out what needs to be plotted and call model/data plotters
all **kwargs passed to both _plot_model and _plot_data
model_data dimensions *must* be (mass bins, intervals, r axis)
'''
# TODO we might still want to allow for specific model/data kwargs?
ds_pattern = ds_pattern or ''
strict = kwargs.pop('strict', False)
# Restart marker styles each plotting call
markers = iter(self._MARKERS)
# TODO need to figure out how we handle passed kwargs better
default_clr = kwargs.pop('color', None)
# ------------------------------------------------------------------
# Determine the relevant datasets to the given pattern
# ------------------------------------------------------------------
datasets = self.obs.filter_datasets(ds_pattern)
if strict and ds_pattern and not datasets:
mssg = f"Dataset matching '{ds_pattern}' do not exist in {self.obs}"
# raise DataError
raise KeyError(mssg)
# ------------------------------------------------------------------
# Iterate over the datasets, keeping track of all relevant masses
# and calling `_plot_data`
# ------------------------------------------------------------------
masses = {}
for key, dset in datasets.items():
mrk = next(markers)
# get mass bin of this dataset, for later model plotting
if 'm' in dset.mdata:
m = dset.mdata['m'] * u.Msun
mass_bin = np.where(self.mj == m)[0][0]
else:
mass_bin = self.star_bin
if mass_bin in masses:
clr = masses[mass_bin][0][0].get_color()
else:
clr = default_clr
# plot the data
try:
line = self._plot_data(ax, dset, y_key, marker=mrk, color=clr,
err_transform=err_transform, **kwargs)
except KeyError as err:
if strict:
raise err
else:
# warnings.warn(err.args[0])
continue
masses.setdefault(mass_bin, [])
masses[mass_bin].append(line)
# ------------------------------------------------------------------
# Based on the masses of data plotted, plot the corresponding axes of
# the model data, calling `_plot_model`
# ------------------------------------------------------------------
if model_data is not None:
# ensure that the data is (mass bin, intervals, r domain)
if len(model_data.shape) != 3:
raise ValueError("invalid model data shape")
# No data plotted, use the star_bin
if not masses:
if model_data.shape[0] > 1:
masses = {self.star_bin: None}
else:
masses = {0: None}
res_ax = None
for mbin, errbars in masses.items():
ymodel = model_data[mbin, :, :]
# TODO having model/data be same color is kinda hard to read
# this is why I added mfc=none, but I dont like that either
if errbars is not None:
clr = errbars[0][0].get_color()
else:
clr = default_clr
self._plot_model(ax, ymodel, color=clr, **kwargs)
if residuals:
res_ax = self._add_residuals(ax, ymodel, errbars,
res_ax=res_ax, **kwargs)
if self.rlims is not None:
ax.set_xlim(*self.rlims)
# -----------------------------------------------------------------------
# Plot extras
# -----------------------------------------------------------------------
def _add_residuals(self, ax, ymodel, errorbars, *,
xmodel=None, y_unit=None, res_ax=None, **kwargs):
'''
errorbars : a list of outputs from calls to plt.errorbars
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
if not errorbars:
mssg = "Cannot compute residuals, no observables data provided"
raise ValueError(mssg)
# ------------------------------------------------------------------
# Get model data and spline
# ------------------------------------------------------------------
if xmodel is None:
xmodel = self.r
if y_unit is not None:
ymodel = ymodel.to(y_unit)
ymedian = self._get_median(ymodel)
yspline = util.QuantitySpline(xmodel, ymedian)
# ------------------------------------------------------------------
# Setup axes, adding a new smaller axe for the residual underneath,
# if it hasn't already been created (and passed to `res_ax`)
# ------------------------------------------------------------------
if res_ax is None:
divider = make_axes_locatable(ax)
res_ax = divider.append_axes('bottom', size="15%", pad=0, sharex=ax)
res_ax.grid()
res_ax.set_xscale(ax.get_xscale())
# ------------------------------------------------------------------
# Plot the model line, hopefully centred on zero
# ------------------------------------------------------------------
self._plot_model(res_ax, ymodel - ymedian, color='k')
# ------------------------------------------------------------------
# Get data from the plotted errorbars
# ------------------------------------------------------------------
for errbar in errorbars:
# --------------------------------------------------------------
# Get the actual datapoints, and the hopefully correct units
# --------------------------------------------------------------
xdata, ydata = errbar[0].get_data()
ydata = ydata.to(ymedian.unit)
# --------------------------------------------------------------
# Grab relevant formatting (colours and markers)
# --------------------------------------------------------------
clr = errbar[0].get_color()
mrk = errbar[0].get_marker()
# --------------------------------------------------------------
# Parse the errors from the size of the errorbar lines (messy)
# --------------------------------------------------------------
xerr = yerr = None
if errbar.has_xerr:
xerr_lines = errbar[2][0]
yerr_lines = errbar[2][1] if errbar.has_yerr else None
elif errbar.has_yerr:
xerr_lines, yerr_lines = None, errbar[2][0]
else:
xerr_lines = yerr_lines = None
if xerr_lines:
xerr = np.array([(np.diff(seg, axis=0) / 2)[..., -1]
for seg in xerr_lines.get_segments()]).T[0]
xerr <<= xdata.unit
if yerr_lines:
yerr = np.array([(np.diff(seg, axis=0) / 2)[..., -1]
for seg in yerr_lines.get_segments()]).T[0]
yerr <<= ydata.unit
# --------------------------------------------------------------
# Compute the residuals and plot them
# --------------------------------------------------------------
res = yspline(xdata) - ydata
res_ax.errorbar(xdata, res, xerr=xerr, yerr=yerr,
color=clr, marker=mrk, linestyle='none')
return res_ax
def _add_hyperparam(self, ax, ymodel, xdata, ydata, yerr):
# TODO this is still a complete mess
yspline = util.QuantitySpline(self.r, ymodel)
if hasattr(ax, 'aeff_text'):
aeff_str = ax.aeff_text.get_text()
aeff = float(aeff_str[aeff_str.rfind('$') + 1:])
else:
# TODO figure out best place to place this at
ax.aeff_text = ax.text(0.1, 0.3, '')
aeff = 0.
aeff += util.hyperparam_effective(ydata, yspline(xdata), yerr)
ax.aeff_text.set_text(fr'$\alpha_{{eff}}=${aeff:.4e}')
# -----------------------------------------------------------------------
# Observables plotting
# -----------------------------------------------------------------------
@_support_units
def plot_LOS(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='km/s'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Line-of-Sight Velocity Dispersion')
ax.set_xscale("log")
if show_obs:
pattern, var = '*velocity_dispersion*', 'σ'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.LOS,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_tot(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Total Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_tot'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.pm_tot,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_ratio(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Proper Motion Anisotropy")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_ratio'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
self._plot_profile(ax, pattern, var, self.pm_ratio,
strict=strict, residuals=residuals,
x_unit=x_unit)
ax.legend()
return fig
@_support_units
def plot_pm_T(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Tangential Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_T'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
# pm_T = self.pm_T.to('mas/yr')
self._plot_profile(ax, pattern, var, self.pm_T,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_pm_R(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc', y_unit='mas/yr'):
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Radial Proper Motion")
ax.set_xscale("log")
if show_obs:
pattern, var = '*proper_motion*', 'PM_R'
strict = show_obs == 'strict'
else:
pattern = var = None
strict = False
# pm_R = self.pm_R.to('mas/yr')
self._plot_profile(ax, pattern, var, self.pm_R,
strict=strict, residuals=residuals,
x_unit=x_unit, y_unit=y_unit)
ax.legend()
return fig
@_support_units
def plot_number_density(self, fig=None, ax=None,
show_obs=True, residuals=False, *,
x_unit='pc'):
def quad_nuisance(err):
return np.sqrt(err**2 + (self.s2 << err.unit**2))
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Number Density')
ax.loglog()
if show_obs:
pattern, var = '*number_density*', 'Σ'
strict = show_obs == 'strict'
kwargs = {'err_transform': quad_nuisance}
else:
pattern = var = None
strict = False
kwargs = {}
self._plot_profile(ax, pattern, var, self.numdens,
strict=strict, residuals=residuals,
x_unit=x_unit, **kwargs)
# bit arbitrary, but probably fine for the most part
ax.set_ylim(bottom=1e-4)
ax.legend()
return fig
@_support_units
def plot_pulsar(self, fig=None, ax=None, show_obs=True):
# TODO this is out of date with the new pulsar probability code
# TODO I dont even think this is what we should use anymore, but the
# new convolved distributions peak
fig, ax = self._setup_artist(fig, ax)
ax.set_title('Pulsar LOS Acceleration')
ax.set_xlabel('R')
ax.set_ylabel(r'$a_{los}$')
maz = u.Quantity(np.empty(self.model.nstep - 1), '1/s')
for i in range(self.model.nstep - 1):
a_domain, Paz = pulsars.cluster_component(self.model, self.model.r[i], -1)
maz[i] = a_domain[Paz.argmax()] << maz.unit
maz = (self.obs['pulsar/P'] * maz).decompose()
if show_obs:
try:
obs_pulsar = self.obs['pulsar']
ax.errorbar(obs_pulsar['r'],
self.obs['pulsar/Pdot'],
yerr=self.obs['pulsar/ΔPdot'],
fmt='k.')
except KeyError as err:
if show_obs != 'attempt':
raise err
model_r = self.model.r.to(u.arcmin, util.angular_width(self.model.d))
upper_az, = ax.plot(model_r[:-1], maz)
ax.plot(model_r[:-1], -maz, c=upper_az.get_color())
return fig
@_support_units
def plot_pulsar_spin_dist(self, fig=None, ax=None, pulsar_ind=0,
show_obs=True, show_conv=False):
import scipy.interpolate as interp
fig, ax = self._setup_artist(fig, ax)
# pulsars = self.obs['pulsar']
puls_obs = self.obs['pulsar/spin']
id_ = puls_obs['id'][pulsar_ind].value.decode()
ax.set_title(f'Pulsar "{id_}" Period Derivative Likelihood')
ax.set_ylabel('Probability')
ax.set_xlabel(r'$\dot{P}/P$ $\left[s^{-1}\right]$')
mass_bin = -1
kde = pulsars.field_Pdot_KDE()
Pdot_min, Pdot_max = kde.dataset[1].min(), kde.dataset[1].max()
R = puls_obs['r'][pulsar_ind].to(u.pc)
P = puls_obs['P'][pulsar_ind].to('s')
Pdot_meas = puls_obs['Pdot'][pulsar_ind]
ΔPdot_meas = np.abs(puls_obs['ΔPdot'][pulsar_ind])
PdotP_domain, PdotP_c_prob = pulsars.cluster_component(self.model,
R, mass_bin)
Pdot_domain = (P * PdotP_domain).decompose()
# linear to avoid effects around asymptote
Pdot_c_spl = interp.UnivariateSpline(
Pdot_domain, PdotP_c_prob, k=1, s=0, ext=1
)
err = util.gaussian(x=Pdot_domain, sigma=ΔPdot_meas, mu=0)
err_spl = interp.UnivariateSpline(Pdot_domain, err, k=3, s=0, ext=1)
lg_P = np.log10(P / P.unit)
P_grid, Pdot_int_domain = np.mgrid[lg_P:lg_P:1j, Pdot_min:Pdot_max:200j]
P_grid, Pdot_int_domain = P_grid.ravel(), Pdot_int_domain.ravel()
Pdot_int_prob = kde(np.vstack([P_grid, Pdot_int_domain]))
Pdot_int_spl = interp.UnivariateSpline(
Pdot_int_domain, Pdot_int_prob, k=3, s=0, ext=1
)
Pdot_int_prob = util.RV_transform(
domain=10**Pdot_int_domain, f_X=Pdot_int_spl,
h=np.log10, h_prime=lambda y: (1 / (np.log(10) * y))
)
Pdot_int_spl = interp.UnivariateSpline(
10**Pdot_int_domain, Pdot_int_prob, k=3, s=0, ext=1
)
lin_domain = np.linspace(0., 1e-18, 5_000 // 2)
lin_domain = np.concatenate((np.flip(-lin_domain[1:]), lin_domain))
conv1 = np.convolve(err_spl(lin_domain), Pdot_c_spl(lin_domain), 'same')
conv2 = np.convolve(conv1, Pdot_int_spl(lin_domain), 'same')
# Normalize
conv2 /= interp.UnivariateSpline(
lin_domain, conv2, k=3, s=0, ext=1
).integral(-np.inf, np.inf)
cluster_μ = self.obs.mdata['μ'] << u.Unit("mas/yr")
PdotP_pm = pulsars.shklovskii_component(cluster_μ, self.model.d)
cluster_coords = (self.obs.mdata['b'], self.obs.mdata['l']) * u.deg
PdotP_gal = pulsars.galactic_component(*cluster_coords, D=self.model.d)
x_total = (lin_domain / P) + PdotP_pm + PdotP_gal
ax.plot(x_total, conv2)
if show_conv:
# Will really mess the scaling up, usually
ax.plot(x_total, Pdot_c_spl(lin_domain))
ax.plot(x_total, conv1)
if show_obs:
ax.axvline((Pdot_meas / P).decompose(), c='r', ls=':')
prob_dist = interp.interp1d(
(lin_domain / P) + PdotP_pm + PdotP_gal, conv2,
assume_sorted=True, bounds_error=False, fill_value=0.0
)
print('prob=', prob_dist((Pdot_meas / P).decompose()))
return fig
@_support_units
def plot_pulsar_orbital_dist(self, fig=None, ax=None, pulsar_ind=0,
show_obs=True, show_conv=False):
import scipy.interpolate as interp
fig, ax = self._setup_artist(fig, ax)
# pulsars = self.obs['pulsar']
puls_obs = self.obs['pulsar/orbital']
id_ = puls_obs['id'][pulsar_ind].value.decode()
ax.set_title(f'Pulsar "{id_}" Period Derivative Likelihood')
ax.set_ylabel('Probability')
ax.set_xlabel(r'$\dot{P}/P$ $\left[s^{-1}\right]$')
mass_bin = -1
R = puls_obs['r'][pulsar_ind].to(u.pc)
P = puls_obs['Pb'][pulsar_ind].to('s')
Pdot_meas = puls_obs['Pbdot'][pulsar_ind]
ΔPdot_meas = np.abs(puls_obs['ΔPbdot'][pulsar_ind])
PdotP_domain, PdotP_c_prob = pulsars.cluster_component(self.model,
R, mass_bin)
Pdot_domain = (P * PdotP_domain).decompose()
Pdot_c_spl = interp.UnivariateSpline(
Pdot_domain, PdotP_c_prob, k=1, s=0, ext=1
)
err = util.gaussian(x=Pdot_domain, sigma=ΔPdot_meas, mu=0)
err_spl = interp.UnivariateSpline(Pdot_domain, err, k=3, s=0, ext=1)
lin_domain = np.linspace(0., 1e-11, 5_000 // 2)
lin_domain = np.concatenate((np.flip(-lin_domain[1:]), lin_domain))
conv = np.convolve(err_spl(lin_domain), Pdot_c_spl(lin_domain), 'same')
# conv = np.convolve(err, PdotP_c_prob, 'same')
# Normalize
conv /= interp.UnivariateSpline(
lin_domain, conv, k=3, s=0, ext=1
).integral(-np.inf, np.inf)
cluster_μ = self.obs.mdata['μ'] << u.Unit("mas/yr")
PdotP_pm = pulsars.shklovskii_component(cluster_μ, self.model.d)
cluster_coords = (self.obs.mdata['b'], self.obs.mdata['l']) * u.deg
PdotP_gal = pulsars.galactic_component(*cluster_coords, D=self.model.d)
x_total = (lin_domain / P) + PdotP_pm + PdotP_gal
ax.plot(x_total, conv)
if show_conv:
# Will really mess the scaling up, usually
ax.plot(x_total, PdotP_c_prob)
ax.plot(x_total, conv)
if show_obs:
ax.axvline((Pdot_meas / P).decompose(), c='r', ls=':')
prob_dist = interp.interp1d(
x_total, conv,
assume_sorted=True, bounds_error=False, fill_value=0.0
)
print('prob=', prob_dist((Pdot_meas / P).decompose()))
return fig
@_support_units
def plot_all(self, fig=None, show_obs='attempt'):
'''Plots all the primary profiles (numdens, LOS, PM)
but *not* the mass function, pulsars, or any secondary profiles
(cum-mass, remnants, etc)
'''
fig, axes = self._setup_multi_artist(fig, (3, 2))
axes = axes.reshape((3, 2))
fig.suptitle(str(self.obs))
kw = {}
self.plot_number_density(fig=fig, ax=axes[0, 0], **kw)
self.plot_LOS(fig=fig, ax=axes[1, 0], **kw)
self.plot_pm_ratio(fig=fig, ax=axes[2, 0], **kw)
self.plot_pm_tot(fig=fig, ax=axes[0, 1], **kw)
self.plot_pm_T(fig=fig, ax=axes[1, 1], **kw)
self.plot_pm_R(fig=fig, ax=axes[2, 1], **kw)
for ax in axes.flatten():
ax.set_xlabel('')
return fig
# ----------------------------------------------------------------------
# Mass Function Plotting
# ----------------------------------------------------------------------
@_support_units
def plot_mass_func(self, fig=None, show_obs=True, show_fields=False, *,
colours=None, PI_legend=False, logscaled=False,
field_kw=None):
# ------------------------------------------------------------------
# Setup axes, splitting into two columns if necessary and adding the
# extra ax for the field plot if desired
# ------------------------------------------------------------------
N_rbins = sum([len(d) for d in self.mass_func.values()])
shape = ((int(np.ceil(N_rbins / 2)), int(np.floor(N_rbins / 2))), 2)
# If adding the fields, include an extra column on the left for it
if show_fields:
shape = ((1, *shape[0]), shape[1] + 1)
fig, axes = self._setup_multi_artist(fig, shape, sharex=True)
axes = axes.T.flatten()
ax_ind = 0
# ------------------------------------------------------------------
# If desired, use the `plot_MF_fields` method to show the fields
# ------------------------------------------------------------------
if show_fields:
ax = axes[ax_ind]
if field_kw is None:
field_kw = {}
field_kw.setdefault('radii', [])
# TODO need to figure out a good size and how to do it, for this ax
self.plot_MF_fields(fig, ax, **field_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Iterate over each PI, gathering data to plot
# ------------------------------------------------------------------
for PI in sorted(self.mass_func,
key=lambda k: self.mass_func[k][0]['r1']):
bins = self.mass_func[PI]
# Get data for this PI
mf = self.obs[PI]
mbin_mean = (mf['m1'] + mf['m2']) / 2.
mbin_width = mf['m2'] - mf['m1']
N = mf['N'] / mbin_width
ΔN = mf['ΔN'] / mbin_width
# --------------------------------------------------------------
# Iterate over radial bin dicts for this PI
# --------------------------------------------------------------
for rind, rbin in enumerate(bins):
ax = axes[ax_ind]
clr = rbin.get('colour', None)
# ----------------------------------------------------------
# Plot observations
# ----------------------------------------------------------
if show_obs:
r_mask = ((mf['r1'] == rbin['r1'])
& (mf['r2'] == rbin['r2']))
N_data = N[r_mask].value
err_data = ΔN[r_mask].value
err = self.F * err_data
pnts = ax.errorbar(mbin_mean[r_mask], N_data, yerr=err,
fmt='o', color=clr)
clr = pnts[0].get_color()
# ----------------------------------------------------------
# Plot model. Doesn't utilize the `_plot_profile` method, as
# this is *not* a profile, but does use similar, but simpler,
# logic
# ----------------------------------------------------------
dNdm = rbin['dNdm']
midpoint = dNdm.shape[0] // 2
m_domain = self.mj[:dNdm.shape[-1]]
median = dNdm[midpoint]
med_plot, = ax.plot(m_domain, median, '--', c=clr)
alpha = 0.8 / (midpoint + 1)
for sigma in range(1, midpoint + 1):
ax.fill_between(
m_domain,
dNdm[midpoint + sigma],
dNdm[midpoint - sigma],
alpha=1 - alpha, color=clr
)
alpha += alpha
if logscaled:
ax.set_xscale('log')
ax.set_xlabel(None)
# ----------------------------------------------------------
# "Label" each bin with it's radial bounds.
# Uses fake text to allow for using loc='best' from `legend`.
# Really this should be a part of plt (see matplotlib#17946)
# ----------------------------------------------------------
r1 = rbin['r1'].to_value('arcmin')
r2 = rbin['r2'].to_value('arcmin')
fake = plt.Line2D([], [], label=f"r = {r1:.2f}'-{r2:.2f}'")
handles = [fake]
leg_kw = {'handlelength': 0, 'handletextpad': 0}
# If this is the first bin, also add a PI tag
if PI_legend and not rind and not show_fields:
pi_fake = plt.Line2D([], [], label=PI)
handles.append(pi_fake)
leg_kw['labelcolor'] = ['k', clr]
ax.legend(handles=handles, **leg_kw)
ax_ind += 1
# ------------------------------------------------------------------
# Put labels on subfigs
# ------------------------------------------------------------------
for sf in fig.subfigs[show_fields:]:
sf.supxlabel(r'Mass [$M_\odot$]')
fig.subfigs[show_fields].supylabel('dN/dm')
return fig
@_support_units
def plot_MF_fields(self, fig=None, ax=None, *, radii=("rh",),
cmap=None, grid=True):
'''plot all mass function fields in this observation
'''
import shapely.geometry as geom
fig, ax = self._setup_artist(fig, ax)
# Centre dot
ax.plot(0, 0, 'kx')
# ------------------------------------------------------------------
# Iterate over each PI and it's radial bins
# ------------------------------------------------------------------
for PI, bins in self.mass_func.items():
for rbin in bins:
# ----------------------------------------------------------
# Plot the field using this `Field` slice's own plotting method
# ----------------------------------------------------------
clr = rbin.get("colour", None)
rbin['field'].plot(ax, fc=clr, alpha=0.7, ec='k', label=PI)
# make this label private so it's only added once to legend
PI = f'_{PI}'
# ------------------------------------------------------------------
# If desired, add a "pseudo" grid in the polar projection, at 2
# arcmin intervals, up to the rt
# ------------------------------------------------------------------
# Ensure the gridlines don't affect the axes scaling
ax.autoscale(False)
if grid:
rt = self.rt if hasattr(self, 'rt') else (20 << u.arcmin)
ticks = np.arange(2, rt.to_value('arcmin'), 2)
# make sure this grid matches normal grids
grid_kw = {
'color': plt.rcParams.get('grid.color'),
'linestyle': plt.rcParams.get('grid.linestyle'),
'linewidth': plt.rcParams.get('grid.linewidth'),
'alpha': plt.rcParams.get('grid.alpha'),
'zorder': 0.5
}
for gr in ticks:
circle = np.array(geom.Point(0, 0).buffer(gr).exterior).T
gr_line, = ax.plot(*circle, **grid_kw)
ax.annotate(f'{gr:.0f}"', xy=(circle[0].max(), 0),
color=grid_kw['color'])
# ------------------------------------------------------------------
# Try to plot the various radii quantities from this model, if desired
# ------------------------------------------------------------------
# TODO for CI this could be a CI of rh, ra, rt actually (60)
for r_type in radii:
# This is to explicitly avoid very ugly exceptions from geom
if r_type not in {'rh', 'ra', 'rt'}:
mssg = f'radii must be one of {{rh, ra, rt}}, not `{r_type}`'
raise TypeError(mssg)
radius = getattr(self, r_type).to_value('arcmin')
circle = np.array(geom.Point(0, 0).buffer(radius).exterior).T
ax.plot(*circle, ls='--')
ax.text(0, circle[1].max(), r_type)
# ------------------------------------------------------------------
# Add plot labels and legends
# ------------------------------------------------------------------
ax.set_xlabel('RA [arcmin]')
ax.set_ylabel('DEC [arcmin]')
# TODO figure out a better way of handling this always using best? (75)
ax.legend(loc='upper left' if grid else 'best')
return fig
# -----------------------------------------------------------------------
# Model plotting
# -----------------------------------------------------------------------
@_support_units
def plot_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.rho_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.rho_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.rho_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.rho_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.rho_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.rho_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(rf'Surface Density $[M_\odot / pc^3]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_surface_density(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Surface Mass Density')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.Sigma_tot,
x_unit=x_unit, **kw)
# Total Remnant density
if 'rem' in kind:
kw = {"label": "Remnants", "color": "tab:purple"}
self._plot_profile(ax, None, None, self.Sigma_rem,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.Sigma_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.Sigma_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.Sigma_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.Sigma_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(rf'Surface Density $[M_\odot / pc^2]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=6,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_cumulative_mass(self, fig=None, ax=None, kind='all', *,
x_unit='pc'):
if kind == 'all':
kind = {'MS', 'tot', 'BH', 'WD', 'NS'}
fig, ax = self._setup_artist(fig, ax)
# ax.set_title('Cumulative Mass')
# Total density
if 'tot' in kind:
kw = {"label": "Total", "color": "tab:cyan"}
self._plot_profile(ax, None, None, self.cum_M_tot,
x_unit=x_unit, **kw)
# Main sequence density
if 'MS' in kind:
kw = {"label": "Main-sequence stars", "color": "tab:orange"}
self._plot_profile(ax, None, None, self.cum_M_MS,
x_unit=x_unit, **kw)
if 'WD' in kind:
kw = {"label": "White Dwarfs", "color": "tab:green"}
self._plot_profile(ax, None, None, self.cum_M_WD,
x_unit=x_unit, **kw)
if 'NS' in kind:
kw = {"label": "Neutron Stars", "color": "tab:red"}
self._plot_profile(ax, None, None, self.cum_M_NS,
x_unit=x_unit, **kw)
# Black hole density
if 'BH' in kind:
kw = {"label": "Black Holes", "color": "tab:gray"}
self._plot_profile(ax, None, None, self.cum_M_BH,
x_unit=x_unit, **kw)
ax.set_yscale("log")
ax.set_xscale("log")
# ax.set_ylabel(rf'$M_{{enc}} ({self.cum_M_tot.unit})$')
ax.set_ylabel(rf'$M_{{enc}}$ $[M_\odot]$')
# ax.set_xlabel('arcsec')
# ax.legend()
fig.legend(loc='upper center', ncol=5,
bbox_to_anchor=(0.5, 1.), fancybox=True)
return fig
@_support_units
def plot_remnant_fraction(self, fig=None, ax=None, *, x_unit='pc'):
'''Fraction of mass in remnants vs MS stars, like in baumgardt'''
fig, ax = self._setup_artist(fig, ax)
ax.set_title("Remnant Fraction")
ax.set_xscale("log")
self._plot_profile(ax, None, None, self.frac_M_MS,
x_unit=x_unit, label="Main-sequence stars")
self._plot_profile(ax, None, None, self.frac_M_rem,
x_unit=x_unit, label="Remnants")
ax.set_ylabel(r"Mass fraction $M_{MS}/M_{tot}$, $M_{remn.}/M_{tot}$")
ax.set_ylim(0.0, 1.0)
ax.legend()
return fig
# --------------------------------------------------------------------------
# Visualizers
# --------------------------------------------------------------------------
class ModelVisualizer(_ClusterVisualizer):
'''
class for making, showing, saving all the plots related to a single model
'''
@classmethod
def from_chain(cls, chain, observations, method='median'):
'''
create a Visualizer instance based on a chain, y taking the median
of the chain parameters
'''
reduc_methods = {'median': np.median, 'mean': np.mean}
# if 3d (Niters, Nwalkers, Nparams)
# if 2d (Nwalkers, Nparams)
# if 1d (Nparams)
chain = chain.reshape((-1, chain.shape[-1]))
theta = reduc_methods[method](chain, axis=0)
return cls(Model(theta, observations), observations)
@classmethod
def from_theta(cls, theta, observations):
'''
create a Visualizer instance based on a theta, see `Model` for allowed
theta types
'''
return cls(Model(theta, observations), observations)
def __init__(self, model, observations=None):
self.model = model
self.obs = observations if observations else model.observations
self.rh = model.rh
self.ra = model.ra
self.rt = model.rt
self.F = model.F
self.s2 = model.s2
self.d = model.d
self.r = model.r
self.rlims = (9e-3, self.r.max() + (5 << self.r.unit))
self._2πr = 2 * np.pi * model.r
self.star_bin = model.nms - 1
self.mj = model.mj
self.LOS = np.sqrt(self.model.v2pj)[:, np.newaxis, :]
self.pm_T = np.sqrt(model.v2Tj)[:, np.newaxis, :]
self.pm_R = np.sqrt(model.v2Rj)[:, np.newaxis, :]
self.pm_tot = np.sqrt(0.5 * (self.pm_T**2 + self.pm_R**2))
self.pm_ratio = self.pm_T / self.pm_R
self._init_numdens(model, observations)
self._init_massfunc(model, observations)
self._init_surfdens(model, observations)
self._init_dens(model, observations)
self._init_mass_frac(model, observations)
self._init_cum_mass(model, observations)
# TODO alot of these init functions could be more homogenous
@_ClusterVisualizer._support_units
def _init_numdens(self, model, observations):
# TODO make this more robust and cleaner
model_nd = model.Sigmaj / model.mj[:, np.newaxis]
nd = np.empty(model_nd.shape)[:, np.newaxis, :] << model_nd.unit
# If have nd obs, apply scaling factor K
for mbin in range(model_nd.shape[0]):
try:
obs_nd = observations['number_density']
obs_r = obs_nd['r'].to(model.r.unit)
nd_interp = util.QuantitySpline(model.r, model_nd[mbin, :])
K = (np.nansum(obs_nd['Σ'] * nd_interp(obs_r) / obs_nd['Σ']**2)
/ np.nansum(nd_interp(obs_r)**2 / obs_nd['Σ']**2))
except KeyError:
K = 1
nd[mbin, 0, :] = K * model_nd[mbin, :]
self.numdens = nd
@_ClusterVisualizer._support_units
def _init_massfunc(self, model, observations, *, cmap=None):
'''
sets self.mass_func as a dict of PI's, where each PI has a list of
subdicts. Each subdict represents a single radial slice (within this PI)
and contains the radii, the mass func values, and the field slice
'''
cmap = cmap or plt.cm.rainbow
self.mass_func = {}
cen = (observations.mdata['RA'], observations.mdata['DEC'])
PI_list = observations.filter_datasets('*mass_function*')
densityj = [util.QuantitySpline(model.r, model.Sigmaj[j])
for j in range(model.nms)]
for i, (key, mf) in enumerate(PI_list.items()):
self.mass_func[key] = []
# TODO same colour for each PI or different for each slice?
clr = cmap(i / len(PI_list))
field = mass.Field.from_dataset(mf, cen=cen)
rbins = np.unique(np.c_[mf['r1'], mf['r2']], axis=0)
rbins.sort(axis=0)
for r_in, r_out in rbins:
this_slc = {'r1': r_in, 'r2': r_out}
field_slice = field.slice_radially(r_in, r_out)
this_slc['field'] = field_slice
this_slc['colour'] = clr
this_slc['dNdm'] = np.empty((1, model.nms))
sample_radii = field_slice.MC_sample(300).to(u.pc)
for j in range(model.nms):
Nj = field_slice.MC_integrate(densityj[j], sample_radii)
widthj = (model.mj[j] * model.mes_widths[j])
this_slc['dNdm'][0, j] = (Nj / widthj).value
self.mass_func[key].append(this_slc)
@_ClusterVisualizer._support_units
def _init_dens(self, model, observations):
shp = (np.newaxis, np.newaxis, slice(None))
self.rho_tot = np.sum(model.rhoj, axis=0)[shp]
self.rho_MS = np.sum(model.rhoj[model._star_bins], axis=0)[shp]
self.rho_rem = np.sum(model.rhoj[model._remnant_bins], axis=0)[shp]
self.rho_BH = | np.sum(model.BH_rhoj, axis=0) | numpy.sum |
import numpy as np
from scipy.interpolate import CubicSpline
class WaypointTraj(object):
"""
"""
def __init__(self, points):
"""
This is the constructor for the Trajectory object. A fresh trajectory
object will be constructed before each mission. For a waypoint
trajectory, the input argument is an array of 3D destination
coordinates. You are free to choose the times of arrival and the path
taken between the points in any way you like.
You should initialize parameters and pre-compute values such as
polynomial coefficients here.
Inputs:
points, (N, 3) array of N waypoint coordinates in 3D
"""
self.v = 2 #m/s
self.points = points
self.t = np.zeros(len(points),)
if np.shape(self.points) == (3,) or np.shape(self.points) == (1,3):
pass
elif np.shape(self.points) != (3,) or np.shape(self.points) != (1,3):
for i in range(len(self.t)-1):
self.t[(i+1)] = np.linalg.norm((points[(i+1)]-points[i]))/self.v
self.point_t = np.zeros(len(points),)
for i in range(int(len(self.t)-1)):
self.point_t[(i+1)] = self.point_t[i] + self.t[i+1]
self.f = CubicSpline(self.point_t,self.points,axis = 0)
def update(self, t):
"""
Given the present time, return the desired flat output and derivatives.
Inputs
t, time, s
Outputs
flat_output, a dict describing the present desired flat outputs with keys
x, position, m
x_dot, velocity, m/s
x_ddot, acceleration, m/s**2
x_dddot, jerk, m/s**3
x_ddddot, snap, m/s**4
yaw, yaw angle, rad
yaw_dot, yaw rate, rad/s
"""
x = np.zeros((3,))
x_dot = np.zeros((3,))
x_ddot = np.zeros((3,))
x_dddot = np.zeros((3,))
x_ddddot = | np.zeros((3,)) | numpy.zeros |
# -*. coding: utf-8 -*-
# Copyright (c) 2008-2011, <NAME>; 2012, <NAME>;
# 2014-2017, <NAME>;
# All rights reserved.
#
# This file is part of Cinfony and ODDT.
# The contents are covered by the terms of the BSD license
# which is included in the file LICENSE_BSD.txt.
"""
rdkit - A Cinfony module for accessing the RDKit from CPython
Global variables:
Chem and AllChem - the underlying RDKit Python bindings
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
from __future__ import print_function
import os
import gzip
from base64 import b64encode
from itertools import combinations
import warnings
from six import BytesIO, PY3
import numpy as np
from sklearn.utils.deprecation import deprecated
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem, Draw
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import Descriptors
from rdkit import RDConfig
import rdkit.DataStructs
import rdkit.Chem.MACCSkeys
import rdkit.Chem.AtomPairs.Pairs
import rdkit.Chem.AtomPairs.Torsions
# ODDT #
from rdkit.Chem.Lipinski import NumRotatableBonds
from rdkit.Chem.AllChem import ComputeGasteigerCharges
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
from rdkit.Chem import CanonicalRankAtoms
from oddt.toolkits.common import detect_secondary_structure, canonize_ring_path
from oddt.toolkits.extras.rdkit import (_sybyl_atom_type,
MolFromPDBBlock,
MolToPDBQTBlock,
MolFromPDBQTBlock)
_descDict = dict(Descriptors.descList)
backend = 'rdk'
__version__ = rdkit.__version__
image_backend = 'png' # png or svg
image_size = (200, 200)
try:
if get_ipython().config:
ipython_notebook = True
else:
ipython_notebook = False
except NameError:
ipython_notebook = False
elementtable = Chem.GetPeriodicTable()
SMARTS_DEF = {
'rot_bond': '[!$(*#*)&!D1&!$(C(F)(F)F)&'
'!$(C(Cl)(Cl)Cl)&'
'!$(C(Br)(Br)Br)&'
'!$(C([CH3])([CH3])[CH3])&'
'!$([CD3](=[N,O,S])-!@[#7,O,S!D1])&'
'!$([#7,O,S!D1]-!@[CD3]=[N,O,S])&'
'!$([CD3](=[N+])-!@[#7!D1])&'
'!$([#7!D1]-!@[CD3]=[N+])]-!@[!$(*#*)&'
'!D1&!$(C(F)(F)F)&'
'!$(C(Cl)(Cl)Cl)&'
'!$(C(Br)(Br)Br)&'
'!$(C([CH3])([CH3])[CH3])]'
}
fps = ['rdkit', 'layered', 'maccs', 'atompairs', 'torsions', 'morgan']
"""A list of supported fingerprint types"""
descs = list(_descDict.keys())
"""A list of supported descriptors"""
_formats = {'smi': "SMILES",
'can': "Canonical SMILES",
'mol': "MDL MOL file",
'mol2': "Tripos MOL2 file",
'sdf': "MDL SDF file",
'inchi': "InChI",
'inchikey': "InChIKey"}
_notinformats = ['can', 'inchikey']
_notoutformats = ['mol2']
if not Chem.INCHI_AVAILABLE:
_notinformats += ['inchi']
_notoutformats += ['inchi', 'inchikey']
informats = dict([(_x, _formats[_x]) for _x in _formats if _x not in _notinformats])
"""A dictionary of supported input formats"""
outformats = dict([(_x, _formats[_x]) for _x in _formats if _x not in _notoutformats])
"""A dictionary of supported output formats"""
base_feature_factory = AllChem.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'))
""" Global feature factory based on BaseFeatures.fdef """
_forcefields = {'uff': AllChem.UFFOptimizeMolecule,
'mmff94': AllChem.MMFFOptimizeMolecule}
forcefields = list(_forcefields.keys())
"""A list of supported forcefields"""
def _filereader_mol2(filename, lazy=False, **kwargs):
block = ''
data = ''
n = 0
with gzip.open(filename, 'rb') if filename.split('.')[-1] == 'gz' else open(filename, 'rb') as f:
for line in f:
line = line.decode('ascii')
if line[:1] == '#':
data += line
elif line[:17] == '@<TRIPOS>MOLECULE':
if n > 0: # skip `zero` molecule (any preciding comments and spaces)
if lazy:
yield Molecule(source={'fmt': 'mol2', 'string': block, 'kwargs': kwargs})
else:
yield readstring('mol2', block, **kwargs)
n += 1
block = data
data = ''
block += line
# open last molecule
if block:
if lazy:
yield Molecule(source={'fmt': 'mol2', 'string': block, 'kwargs': kwargs})
else:
yield readstring('mol2', block, **kwargs)
def _filereader_sdf(filename, lazy=False, **kwargs):
block = ''
n = 0
with gzip.open(filename, 'rb') if filename.split('.')[-1] == 'gz' else open(filename, 'rb') as f:
if lazy:
for line in f:
line = line.decode('ascii')
block += line
if line[:4] == '$$$$':
yield Molecule(source={'fmt': 'sdf', 'string': block, 'kwargs': kwargs})
n += 1
block = ''
if block: # open last molecule if any
yield Molecule(source={'fmt': 'sdf', 'string': block, 'kwargs': kwargs})
else:
for mol in Chem.ForwardSDMolSupplier(f, **kwargs):
yield Molecule(mol)
def _filereader_pdb(filename, lazy=False, opt=None, **kwargs):
block = ''
n = 0
with gzip.open(filename, 'rb') if filename.split('.')[-1] == 'gz' else open(filename, 'rb') as f:
for line in f:
line = line.decode('ascii')
block += line
if line[:6] == 'ENDMDL':
if lazy:
yield Molecule(source={'fmt': 'pdb', 'string': block, 'opt': opt, 'kwargs': kwargs})
else:
yield readstring('pdb', block, **kwargs)
n += 1
block = ''
if block: # open last molecule if any
if lazy:
yield Molecule(source={'fmt': 'pdb', 'string': block, 'opt': opt, 'kwargs': kwargs})
else:
yield readstring('pdb', block, **kwargs)
def _filereader_pdbqt(filename, lazy=False, opt=None, **kwargs):
block = ''
n = 0
with gzip.open(filename, 'rb') if filename.split('.')[-1] == 'gz' else open(filename, 'rb') as f:
for line in f:
line = line.decode('ascii')
block += line
if line[:6] == 'ENDMDL':
if lazy:
yield Molecule(source={'fmt': 'pdbqt', 'string': block, 'opt': opt, 'kwargs': kwargs})
else:
yield readstring('pdbqt', block, **kwargs)
n += 1
block = ''
if block: # open last molecule if any
if lazy:
yield Molecule(source={'fmt': 'pdbqt', 'string': block, 'opt': opt, 'kwargs': kwargs})
else:
yield readstring('pdbqt', block, **kwargs)
def readfile(format, filename, lazy=False, opt=None, **kwargs):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
You can access the first molecule in a file using the next() method
of the iterator:
mol = next(readfile("smi", "myfile.smi"))
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print(atomtotal)
43
"""
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
format = format.lower()
# Eagerly evaluate the supplier functions in order to report
# errors in the format and errors in opening the file.
# Then switch to an iterator...
if format in ["sdf", "mol"]:
return _filereader_sdf(filename, lazy=lazy, **kwargs)
elif format == "pdb":
return _filereader_pdb(filename, lazy=lazy, **kwargs)
elif format == "pdbqt":
return _filereader_pdbqt(filename, lazy=lazy, **kwargs)
elif format == "mol2":
return _filereader_mol2(filename, lazy=lazy, **kwargs)
elif format == "smi":
iterator = Chem.SmilesMolSupplier(filename, delimiter=" \t",
titleLine=False, **kwargs)
def smi_reader():
for mol in iterator:
yield Molecule(mol)
return smi_reader()
elif format == 'inchi' and Chem.INCHI_AVAILABLE:
def inchi_reader():
for line in open(filename):
mol = Chem.inchi.MolFromInchi(line.strip(), **kwargs)
yield Molecule(mol)
return inchi_reader()
else:
raise ValueError("%s is not a recognised RDKit format" % format)
def readstring(format, string, **kwargs):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
string = str(string)
format = format.lower()
if format in ["mol", "sdf"]:
supplier = Chem.SDMolSupplier(**kwargs)
supplier.SetData(string)
mol = next(supplier)
del supplier
elif format == "mol2":
mol = Chem.MolFromMol2Block(string, **kwargs)
elif format == "pdb":
mol = MolFromPDBBlock(string, **kwargs)
elif format == 'pdbqt':
mol = MolFromPDBQTBlock(string, **kwargs)
elif format == "smi":
s = string.strip().split('\n')[0].strip().split()
mol = Chem.MolFromSmiles(s[0], **kwargs)
if mol:
mol.SetProp("_Name", ' '.join(s[1:]))
elif format == 'inchi' and Chem.INCHI_AVAILABLE:
mol = Chem.inchi.MolFromInchi(string, **kwargs)
else:
raise ValueError("%s is not a recognised RDKit format" % format)
return Molecule(mol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwite -- if the output file already exists, should it
be overwritten? (default is False)
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, **kwargs):
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % self.filename)
if format == "sdf":
self._writer = Chem.SDWriter(self.filename, **kwargs)
elif format == "smi":
self._writer = Chem.SmilesWriter(self.filename, isomericSmiles=True, includeHeader=False, **kwargs)
elif format in ('inchi', 'inchikey') and Chem.INCHI_AVAILABLE:
self._writer = open(filename, 'w')
elif format in ('mol2', 'pdbqt'):
self._writer = gzip.open(filename, 'w') if filename.split('.')[-1] == 'gz' else open(filename, 'w')
elif format == "pdb":
self._writer = Chem.PDBWriter(self.filename)
else:
raise ValueError("%s is not a recognised RDKit format" % format)
self.total = 0 # The total number of molecules written to the file
self.writer_kwargs = kwargs
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.format in ('inchi', 'inchikey', 'mol2'):
self._writer.write(molecule.write(self.format, **self.writer_kwargs) + '\n')
if self.format == 'pdbqt':
self._writer.write('MODEL %i\n' % (self.total + 1) +
molecule.write(self.format, **self.writer_kwargs) + '\nENDMDL\n')
else:
self._writer.write(molecule.Mol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.filename = None
self._writer.flush()
del self._writer
class Molecule(object):
"""Represent an rdkit Molecule.
Required parameter:
Mol -- an RDKit Mol or any type of cinfony Molecule
Attributes:
atoms, data, formula, molwt, title
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make3D(), removeh(),
write()
The underlying RDKit Mol can be accessed using the attribute:
Mol
"""
_cinfony = True
def __new__(cls, Mol=-1, source=None, *args, **kwargs):
""" Trap RDKit molecules which are 'None' """
if Mol is None and source is None:
return None
else:
return super(Molecule, cls).__new__(cls)
def __init__(self, Mol=None, source=None, protein=False):
if Mol and not isinstance(Mol, (Molecule, Chem.Mol)):
raise ValueError('Mol needs to be ODDT or RDKit molecule instance')
if hasattr(Mol, "_cinfony"):
a, b = Mol._exchange
if a == 0:
molecule = readstring("smi", b)
else:
molecule = readstring("mol", b)
Mol = molecule.Mol
self.Mol = Mol
# ODDT #
self._protein = protein
# caches
self._atom_dict = None
self._res_dict = None
self._ring_dict = None
self._coords = None
self._charges = None
self._residues = None
# lazy
self._source = source # dict with keys: n, fmt, string, filename
if Mol is None and not source:
self = None
return None
# lazy Molecule parsing requires masked Mol
@property
def Mol(self):
if not self._Mol and self._source:
kwargs = self._source.get('kwargs', {})
tmp_mol = readstring(self._source['fmt'], self._source['string'], **kwargs)
if tmp_mol is None:
self = None
return None
else:
self._Mol = tmp_mol.Mol
self._source = None
return self._Mol
@Mol.setter
def Mol(self, value):
self._Mol = value
@property
def atoms(self):
return AtomStack(self.Mol)
@property
def data(self):
return MoleculeData(self.Mol)
@property
def molwt(self):
return Descriptors.MolWt(self.Mol)
@property
def formula(self):
return Descriptors.MolecularFormula(self.Mol)
def _gettitle(self):
# Note to self: maybe should implement the get() method for self.data
if "_Name" in self.data:
return self.data["_Name"]
else:
return ""
def _settitle(self, val):
self.Mol.SetProp("_Name", val)
title = property(_gettitle, _settitle)
@property
def _exchange(self):
if self.Mol.GetNumConformers() == 0:
return (0, self.write("smi"))
else:
return (1, self.write("mol"))
# cache frequently used properties and cache them in prefixed [_] variables
@property
def coords(self):
if self._coords is None:
self._coords = np.array([atom.coords for atom in self.atoms], dtype=np.float32)
self._coords.setflags(write=False)
return self._coords
@coords.setter
def coords(self, new):
new = | np.asarray(new, dtype=np.float64) | numpy.asarray |
from __future__ import print_function, division
import torch
import os
from os.path import exists, join, basename
from skimage import io
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from geotnf.transformation import GeometricTnf
from torch.autograd import Variable
from geotnf.transformation import homography_mat_from_4_pts
class SynthDataset(Dataset):
"""
Synthetically transformed pairs dataset for training with strong supervision
Args:
csv_file (string): Path to the csv file with image names and transformations.
training_image_path (string): Directory with all the images.
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
Returns:
Dict: {'image': full dataset image, 'theta': desired transformation}
"""
def __init__(self,
dataset_csv_path,
dataset_csv_file,
dataset_image_path,
output_size=(480,640),
geometric_model='affine',
dataset_size=0,
transform=None,
random_sample=False,
random_t=0.5,
random_s=0.5,
random_alpha=1/6,
random_t_tps=0.4,
four_point_hom=True):
self.out_h, self.out_w = output_size
# read csv file
self.train_data = pd.read_csv(os.path.join(dataset_csv_path,dataset_csv_file))
self.random_sample = random_sample
self.random_t = random_t
self.random_t_tps = random_t_tps
self.random_alpha = random_alpha
self.random_s = random_s
self.four_point_hom = four_point_hom
self.dataset_size = dataset_size
if dataset_size!=0:
dataset_size = min((dataset_size,len(self.train_data)))
self.train_data = self.train_data.iloc[0:dataset_size,:]
self.img_names = self.train_data.iloc[:,0]
if self.random_sample==False:
self.theta_array = self.train_data.iloc[:, 1:].values().astype('float')
# copy arguments
self.dataset_image_path = dataset_image_path
self.transform = transform
self.geometric_model = geometric_model
self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False)
def __len__(self):
return len(self.train_data)
def __getitem__(self, idx):
if self.random_sample and self.dataset_size==1:
np.random.seed(1) # for debugging purposes
# read image
img_name = os.path.join(self.dataset_image_path, self.img_names[idx])
image = io.imread(img_name)
# read theta
if self.random_sample==False:
theta = self.theta_array[idx, :]
if self.geometric_model=='affine':
# reshape theta to 2x3 matrix [A|t] where
# first row corresponds to X and second to Y
# theta = theta[[0,1,4,2,3,5]].reshape(2,3)
theta = theta[[3,2,5,1,0,4]] #.reshape(2,3)
if self.geometric_model=='tps':
theta = np.expand_dims(np.expand_dims(theta,1),2)
if self.geometric_model=='afftps':
theta[[0,1,2,3,4,5]] = theta[[3,2,5,1,0,4]]
else:
if self.geometric_model=='affine' or self.geometric_model=='afftps':
rot_angle = (np.random.rand(1)-0.5)*2*np.pi/12; # between -np.pi/12 and np.pi/12
sh_angle = (np.random.rand(1)-0.5)*2*np.pi/6; # between -np.pi/6 and np.pi/6
lambda_1 = 1+(2*np.random.rand(1)-1)*0.25; # between 0.75 and 1.25
lambda_2 = 1+(2*np.random.rand(1)-1)*0.25; # between 0.75 and 1.25
tx=(2*np.random.rand(1)-1)*0.25; # between -0.25 and 0.25
ty=(2*np.random.rand(1)-1)*0.25;
R_sh = np.array([[np.cos(sh_angle[0]),-np.sin(sh_angle[0])],
[ | np.sin(sh_angle[0]) | numpy.sin |
"""
tools related to processing SUNTANS output for DWAQ input.
requires instrument version of SUNTANS code for the flux
integration
"""
import glob
import numpy as np
import os
from ... import utils
from ..suntans import sunreader
from ...io import qnc
from . import waq_scenario
from ..suntans import forcing
from ...spatial import wkb2shp
from ...grid import unstructured_grid
def sun_to_flowgeom(sun,proc,filename,overwrite=True):
"""
given a SunReader object, write the 2-D grid for the given
processor out to the a dwaq-compatible xxxx_flowgeom.nc
file.
overwrite: if True, silently overwrite existing output file.
tries to write enough info to recreate the grid
"""
# Figure out some ownership:
g=sun.grid(proc) # processor-local grid
gg=sun.grid() # global grid
# Cell ownership
is_local=np.zeros(g.Ncells(),'b1')
is_local[ sun.proc_nonghost_cells(proc) ] = True
g2l=sun.map_global_cells_to_local_cells(allow_cache=False,honor_ghosts=True)
l2g=sun.map_local_to_global(proc)
my_cell_procs=g2l['proc'][l2g] # map local cell index to home processor
assert np.all( (my_cell_procs==proc) == is_local )
# Edge ownership - note that marker 6 edges are not output!
# and marker 5 edges are given to the lower numbered processor
edge_marks=g.edges[:,2]
# edges which will be output - skip closed and super-ghosty edges, but
# include shared edges, flow, open boundaries
edge_sel= (edge_marks != 6) & (edge_marks!=1)
bdry_edges=(edge_marks>0)&(edge_marks<4) # non ghost edges which have only 1 cell nbr
edge_cells=g.edges[edge_sel,3:] # neighbors of edges to be output
edge_cells[ bdry_edges[edge_sel],1 ] = edge_cells[ bdry_edges[edge_sel], 0]
assert np.all(edge_cells>=0)
edge_owners=g2l['proc'][l2g[edge_cells].min(axis=1)]
cdata=sun.celldata(proc)
nc=qnc.empty(fn=filename, # os.path.join(dwaq_dir,'%04d_flowgeom.nc'%proc),
overwrite=overwrite,
# DWAQ requires netcdf3
format='NETCDF3_CLASSIC')
mesh_name='FlowMesh' # for UGRID references
nc.createDimension('nFlowElem',g.Ncells())
nc.createDimension('nFlowElemMaxNode',3)
# other dimensions created on demand.
# cell centers
nc['FlowElem_xcc']['nFlowElem']=cdata[:,0]
nc.FlowElem_xcc.units='m'
nc.FlowElem_xcc.standard_name='projection_x_coordinate'
nc.FlowElem_xcc.long_name="Flow element circumcenter x"
nc.FlowElem_xcc.bounds='FlowElemContour_x' # ?
nc.FlowElem_xcc.grid_mapping='projected_coordinate_system'
nc['FlowElem_ycc']['nFlowElem']=cdata[:,1]
nc.FlowElem_ycc.units='m'
nc.FlowElem_ycc.standard_name='projection_y_coordinate'
nc.FlowElem_ycc.long_name="Flow element circumcenter y"
nc.FlowElem_ycc.bounds='FlowElemContour_y' # ?
nc.FlowElem_ycc.grid_mapping='projected_coordinate_system'
nc['FlowElem_zcc']['nFlowElem']=cdata[:,3]
nc.FlowElem_zcc.long_name="Flow element average bottom level (average of all corners)."
nc.FlowElem_zcc.positive='down'
nc.FlowElem_zcc.mesh=mesh_name
nc.FlowElem_zcc.location='face'
nc['FlowElem_bac']['nFlowElem']=cdata[:,2]
nc.FlowElem_bac.long_name="Flow element area"
nc.FlowElem_bac.units='m2'
nc.FlowElem_bac.standard_name='cell_area'
nc.FlowElem_bac.mesh=mesh_name
nc.FlowElem_bac.location='face'
nc['FlowElemContour_x']['nFlowElem','nFlowElemContourPts'] = g.points[g.cells[:,:],0]
nc.FlowElemContour_x.units='m'
nc.FlowElemContour_x.standard_name="projection_x_coordinate"
nc.FlowElemContour_x.long_name="List of x-points forming flow element"
nc.FlowElemContour_x.grid_mapping='projected_coordinate_system'
nc['FlowElemContour_y']['nFlowElem','nFlowElemContourPts'] = g.points[g.cells[:,:],1]
nc.FlowElemContour_y.units='m'
nc.FlowElemContour_y.standard_name="projection_y_coordinate"
nc.FlowElemContour_y.long_name="List of y-points forming flow element"
nc.FlowElemContour_y.grid_mapping='projected_coordinate_system'
# not sure how this differs from zcc, aside from sign.
nc['FlowElem_bl']['nFlowElem']=-cdata[:,3]
nc.FlowElem_bl.units='m'
nc.FlowElem_bl.positive='up'
nc.FlowElem_bl.standard_name='sea_floor_depth'
nc.FlowElem_bl.long_name="Bottom level at flow element's circumcenter."
nc.FlowElem_bl.grid_mapping='projected_coordinate_system'
nc.FlowElem_bl.mesh=mesh_name
nc.FlowElem_bl.location='face'
# should include flow/open boundaries. just not closed boundaries.
links=1+g.edges[edge_sel,3:5] # to 1-based
bdry=links<=0
nelt=len(nc.FlowElem_xcc)
# in .poi files, boundaries are negative, but here, they are appended to
# the regular
links[bdry] = 1+np.arange(np.sum(bdry))
nc['FlowLink']['nFlowLink','nFlowLinkPts']=links.astype(np.int32)
nc.FlowLink.long_name="link/interface between two flow elements"
nc['FlowLinkType']['nFlowLink']=(2*np.ones(links.shape[0])).astype(np.int32)
nc.FlowLinkType.long_name="type of flowlink"
nc.FlowLinkType.valid_range=[1,2]
nc.FlowLinkType.flag_values=[1,2]
nc.FlowLinkType.flag_meanings="link_between_1D_flow_elements link_between_2D_flow_elements"
ec=g.edge_centers()[edge_sel]
nc['FlowLink_xu']['nFlowLink']=ec[:,0]
nc.FlowLink_xu.units='m'
nc.FlowLink_xu.standard_name='projection_x_coordinate'
nc.FlowLink_xu.long_name='Center coordinate of net link (velocity point).'
nc.FlowLink_xu.grid_mapping='projected_coordinate_system'
nc['FlowLink_yu']['nFlowLink']=ec[:,1]
nc.FlowLink_yu.units='m'
nc.FlowLink_yu.standard_name='projection_y_coordinate'
nc.FlowLink_yu.long_name='Center coordinate of net link (velocity point).'
nc.FlowLink_yu.grid_mapping='projected_coordinate_system'
# for now, skip lat/lon fields, projection definition..
if 0:
# single processor only
nc['FlowElemDomain']['nFlowElem']=(proc*np.ones(g.Ncells())).astype(np.int16)
nc['FlowLinkDomain']['nFlowLink']=(proc*np.ones(np.sum(edge_sel))).astype(np.int16)
else:
# single or multiple processors
nc['FlowElemDomain']['nFlowElem']=my_cell_procs.astype(np.int16)
nc['FlowLinkDomain']['nFlowLink']=edge_owners.astype(np.int16)
nc.FlowElemDomain.long_name="Domain number of flow element"
nc.FlowLinkDomain.long_name="Domain number of flow link"
# used to do silly thing with closest_cell() which isn't robust.
nc['FlowElemGlobalNr']['nFlowElem']=1+l2g
nc.FlowElemGlobalNr.long_name="Global flow element numbering"
#---- UGRID-ish metadata and supplementals ----
mesh=nc.createVariable(mesh_name,'i4')
mesh.cf_role='mesh_topology'
mesh.long_name = "Topology data of 2D unstructured mesh"
mesh.dimension = 2
nc['Node_x']['nNode'] = g.points[:,0]
nc['Node_y']['nNode'] = g.points[:,1]
mesh.node_coordinates = "Node_x Node_y"
nc['FlowElemContour_node']['nFlowElem','nFlowElemContourPts'] = g.cells.astype('i4')
face_nodes=nc.FlowElemContour_node
face_nodes.cf_role='face_node_connectivity'
face_nodes.long_name="Maps faces to constituent vertices/nodes"
face_nodes.start_index=0
mesh.face_node_connectivity = 'FlowElemContour_node'
nc['FlowEdge_node']['nFlowEdge','nEdgePts']=g.edges[:,:2].astype('i4')
edge_nodes=nc.FlowEdge_node
edge_nodes.cf_role='edge_node_connectivity'
edge_nodes.long_name="Maps edge to constituent vertices"
edge_nodes.start_index=0
mesh.edge_node_connectivity = 'FlowEdge_node' # attribute required if variables will be defined on edges
# mesh.edge_coordinates = "Mesh2_edge_x Mesh2_edge_y" # optional attribute (requires edge_node_connectivity)
mesh.face_coordinates = "FlowElem_xcc FlowElem_ycc" # optional attribute
# mesh.face_edge_connectivity = "FlowLink" # optional attribute (requires edge_node_connectivity)
mesh.face_face_connectivity = "FlowLink" # optional attribute
z_var_name=z_dim_name="n%s_layers"%mesh_name
# these are a bit fake, as in any given water column the cell with the freesurface
# and the cell with the bed may be truncated
z_bookended = np.concatenate( ([0],-sun.z_levels()) )
nc[z_var_name][z_dim_name] = 0.5*(z_bookended[:-1] + z_bookended[1:])
layers=nc.variables[z_var_name]
layers.standard_name = "ocean_zlevel_coordinate"
layers.long_name = "elevation at layer midpoints"
layers.positive = "up"
layers.units = "meters"
# And add a bounds attribute and variable to cover the distribution of cell interfaces
# note that this doesn't bother with how layers are truncated at the bed or surface
bounds_name = z_var_name+"_bnds"
layers.bounds = bounds_name
bounds=np.concatenate( (z_bookended[:-1,None],
z_bookended[1:,None]),axis=1)
nc[bounds_name][z_dim_name,'d2']=bounds
# Global attributes:
nc.setncattr('institution',"San Francisco Estuary Institute")
nc.setncattr('references',"http://www.deltares.nl")
nc.setncattr('source',"Python/Delft tools, <EMAIL>")
nc.setncattr('history',"Converted from SUNTANS run")
nc.setncattr('Conventions',"CF-1.5:Deltares-0.1")
nc.close()
def postprocess(sun=None,sun_dir=None,force=False):
"""
Take care of any python-side postprocessing of a suntans run.
Namely, this creates the sun_nnnn_flowgeom.nc files.
"""
sun = sun or sunreader.SunReader(sun_dir)
nprocs=sun.num_processors()
dfm_dwaq_path=os.path.join(sun.datadir,'dwaq')
for proc in range(nprocs):
nc_fn=os.path.join(dfm_dwaq_path,
"DFM_DELWAQ_sun_%04d"%proc,
"sun_%04d_flowgeom.nc"%proc)
if force or not os.path.exists( nc_fn ):
sun_to_flowgeom(sun,proc,nc_fn)
class SunHydro(waq_scenario.HydroFiles):
""" specialization for SUNTANS-based hydro.
"""
def __init__(self,sun,flow_shps,*a,**k):
self.sun=sun
self.flow_shps=flow_shps
super(SunHydro,self).__init__(*a,**k)
_bc_groups=None
def group_boundary_elements(self,force=False):
""" map all element ids (0-based) to either -1 (not a boundary)
or a nonnegative id corresponding to contiguous boundary elements.
- why are we grouping elements? shouldn't this be grouping
exchanges? there is an implicit connection of each boundary
exchange to its internal segment, and boundaries are grouped
according to those internal segments. Effectively cannot have
two disparate boundaries going into the same element. Looking
at the grid, this is mostly the case, with the exception of a pair
of small tributaries in Marin.
"""
if force or self._bc_groups is None:
# This part is the same as in waq_scenario
g=self.grid()
if g is None:
return super(SunHydro,self).group_boundary_elements()
self.infer_2d_elements()
poi=self.pointers
bc_sel = (poi[:,0]<0)
bc_elts = | np.unique(self.seg_to_2d_element[ poi[bc_sel,1]-1 ]) | numpy.unique |
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from math import factorial
from astar import astar, create_grid, Node
from dijkstras import shortest_path_mat
from scipy import ndimage
def plot_fr_field_2d(f,delay=None):
for i in range(len(f)):
maxval = np.max(np.max(f[i]))
if maxval > 0:
plt.title("Neuron %d" % i)
plt.imshow(f[i],origin='lower')
if delay:
plt.show(block=False) ; plt.pause(delay)
else:
#plt.show()
plt.savefig('occfr%d.png' % i)
plt.close()
def plot_fr_field_1d(f,delay=None):
for i in range(len(f)):
maxval = np.max(f[i])
if maxval > 0:
plt.title("Neuron %d" % i)
plt.plot(f[i]/maxval)
if delay:
plt.show(block=False) ; plt.pause(delay) ; plt.cla()
else:
plt.show()
plt.close()
def find_closest(A, targets):
inds = np.clip(A.searchsorted(targets), 1, len(A)-1)
left = A[inds-1]
right = A[inds]
return inds-(targets-left < right-targets)
def sum_neighbours(M,i,j):
(h,w) = M.shape
val = 0
if i > 0: val += M[j,i-1]
if i < w-1: val += M[j,i+1]
if j > 0: val += M[j-1,i]
if j < h-1: val += M[j+1,i]
return val
def matmax(M):
(h,w) = M.shape
maxval = -np.inf
maxidx = [-1,-1]
for j in range(h):
for i in range(w):
if M[j,i] > maxval:
maxval = M[j,i]
maxidx = | np.array([j,i]) | numpy.array |
# -*- coding: utf-8 -*-
"""
This enables to parameterize the contributivity measurements to be performed.
"""
from __future__ import print_function
import bisect
import datetime
from itertools import combinations
from math import factorial
from timeit import default_timer as timer
import numpy as np
from loguru import logger
from scipy.stats import norm
from sklearn.linear_model import LinearRegression
from . import multi_partner_learning, constants
class KrigingModel:
def __init__(self, degre, covariance_func):
self.X = np.array([[]])
self.Y = np.array([[]])
self.cov_f = covariance_func
self.degre = degre
self.beta = np.array([[]])
self.H = np.array([[]])
self.K = np.array([[]])
self.invK = np.array([[]])
def fit(self, X, Y):
self.X = X
self.Y = Y
K = np.zeros((len(X), len(X)))
H = np.zeros((len(X), self.degre + 1))
for i, d in enumerate(X):
for j, b in enumerate(X):
K[i, j] = self.cov_f(d, b)
for j in range(self.degre + 1):
H[i, j] = np.sum(d) ** j
self.H = H
self.K = np.linalg.inv(K)
self.invK = np.linalg.inv(K)
Ht_invK_H = H.transpose().dot(self.invK).dot(H)
self.beta = np.linalg.inv(Ht_invK_H).dot(H.transpose()).dot(self.invK).dot(self.Y)
def predict(self, x):
gx = []
for i in range(self.degre + 1):
gx.append(np.sum(x) ** i)
gx = np.array(gx)
cx = []
for i in range(len(self.X)):
cx.append([self.cov_f(self.X[i], x)])
cx = np.array(cx)
pred = gx.transpose().dot(self.beta) + cx.transpose().dot(self.invK).dot(
self.Y - self.H.dot(self.beta)
)
return pred
class Contributivity:
def __init__(self, scenario, name=""):
self.name = name
self.scenario = scenario
nb_partners = len(self.scenario.partners_list)
self.contributivity_scores = np.zeros(nb_partners)
self.scores_std = np.zeros(nb_partners)
self.normalized_scores = np.zeros(nb_partners)
self.computation_time_sec = 0.0
self.first_charac_fct_calls_count = 0
self.charac_fct_values = {(): 0}
self.increments_values = [{} for _ in self.scenario.partners_list]
def __str__(self):
computation_time_sec = str(datetime.timedelta(seconds=self.computation_time_sec))
output = "\n" + self.name + "\n"
output += "Computation time: " + computation_time_sec + "\n"
output += (
"Number of characteristic function computed: "
+ str(self.first_charac_fct_calls_count)
+ "\n"
)
output += f"Contributivity scores: {np.round(self.contributivity_scores, 3)}\n"
output += f"Std of the contributivity scores: {np.round(self.scores_std, 3)}\n"
output += f"Normalized contributivity scores: {np.round(self.normalized_scores, 3)}\n"
return output
def not_twice_characteristic(self, subset):
if len(subset) > 0:
subset = np.sort(subset)
if tuple(subset) not in self.charac_fct_values:
# Characteristic_func(permut) has not been computed yet...
# ... so we compute, store, and return characteristic_func(permut)
self.first_charac_fct_calls_count += 1
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
if len(small_partners_list) > 1:
mpl = self.scenario._multi_partner_learning_approach(self.scenario,
partners_list=small_partners_list,
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
else:
mpl = multi_partner_learning.SinglePartnerLearning(self.scenario,
partner=small_partners_list[0],
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
mpl.fit()
self.charac_fct_values[tuple(subset)] = mpl.history.score
# we add the new increments
for i in range(len(self.scenario.partners_list)):
if i in subset:
subset_without_i = np.delete(subset, np.argwhere(subset == i))
if (
tuple(subset_without_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset_without_i)] = (
self.charac_fct_values[tuple(subset)]
- self.charac_fct_values[tuple(subset_without_i)]
)
else:
subset_with_i = np.sort(np.append(subset, i))
if (
tuple(subset_with_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset)] = (
self.charac_fct_values[tuple(subset_with_i)]
- self.charac_fct_values[tuple(subset)]
)
# else we will Return the characteristic_func(permut) that was already computed
return self.charac_fct_values[tuple(subset)]
# %% Generalization of Shapley Value computation
def compute_SV(self):
start = timer()
logger.info("# Launching computation of Shapley Value of all partners")
# Initialize list of all players (partners) indexes
partners_count = len(self.scenario.partners_list)
partners_idx = np.arange(partners_count)
# Define all possible coalitions of players
coalitions = [
list(j) for i in range(len(partners_idx)) for j in combinations(partners_idx, i + 1)
]
# For each coalition, obtain value of characteristic function...
# ... i.e.: train and evaluate model on partners part of the given coalition
characteristic_function = []
for coalition in coalitions:
characteristic_function.append(self.not_twice_characteristic(coalition))
# Compute Shapley Value for each partner
# We are using this python implementation: https://github.com/susobhang70/shapley_value
# It requires coalitions to be ordered - see README of https://github.com/susobhang70/shapley_value
list_shapley_value = shapley_value(partners_count, characteristic_function)
# Return SV of each partner
self.name = "Shapley"
self.contributivity_scores = np.array(list_shapley_value)
self.scores_std = np.zeros(len(list_shapley_value))
self.normalized_scores = list_shapley_value / np.sum(list_shapley_value)
end = timer()
self.computation_time_sec = end - start
# %% compute independent raw scores
def compute_independent_scores(self):
start = timer()
logger.info(
"# Launching computation of perf. scores of models trained independently on each partner"
)
# Initialize a list of performance scores
performance_scores = []
# Train models independently on each partner and append perf. score to list of perf. scores
for i in range(len(self.scenario.partners_list)):
performance_scores.append(self.not_twice_characteristic(np.array([i])))
self.name = "Independent scores raw"
self.contributivity_scores = np.array(performance_scores)
self.scores_std = np.zeros(len(performance_scores))
self.normalized_scores = performance_scores / np.sum(performance_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method
def truncated_MC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and
a characteristic function using the truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
# Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
while (
t < 100 or t < q ** 2 * v_max / sv_accuracy ** 2
):
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
char_partnerlists[j + 1] = char_partnerlists[j]
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "TMC Shapley"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method with a small bias correction
def interpol_TMC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and a characteristic
function using the interpolated truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "ITMCS"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
first = True
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
if first:
size_of_rest = 0
for i in range(j, n):
size_of_rest += len(self.scenario.partners_list[i].y_train)
a = (characteristic_all_partners - char_partnerlists[j]) / size_of_rest
first = False
size_of_S = len(self.scenario.partners_list[j].y_train)
char_partnerlists[j + 1] = char_partnerlists[j] + a * size_of_S
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "ITMCS"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the importance sampling method
def IS_lin(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley value corresponding to a list of partner and \
a characteristic function using the importance sampling method and a linear interpolation model."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute the last and the first increments in performance \
# (they are needed to compute the approximated increments)
characteristic_no_partner = 0
last_increments = []
first_increments = []
for k in range(n):
last_increments.append(
characteristic_all_partners
- self.not_twice_characteristic(np.delete(np.arange(n), k))
)
first_increments.append(
self.not_twice_characteristic(np.array([k]))
- characteristic_no_partner
)
# ## definition of the number of data in all datasets
size_of_I = 0
for partner in self.scenario.partners_list:
size_of_I += len(partner.y_train)
def approx_increment(subset, k):
assert k not in subset, "" + str(k) + "is not in " + str(subset) + ""
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
# compute the size of subset : ||subset||
size_of_S = 0
for partner in small_partners_list:
size_of_S += len(partner.y_train)
beta = size_of_S / size_of_I
return (1 - beta) * first_increments[k] + beta * last_increments[k]
# ## compute the renormalization constant of the importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
renorms.append(renorm)
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval is below the value of
# sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
for k in range(n):
# generate the new subset (for the increment) with the inverse method
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(list_k, length_combination):
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
if cumSum / renorms[k] > u:
S = np.array(subset)
break
if cumSum / renorms[k] > u:
break
# compute the increment
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
# computed the weight p/g
contributions[t - 1][k] = (
increment * renorms[k] / np.abs(approx_increment(np.array(S), k))
)
v_max = np.max(np.var(contributions, axis=0))
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the regression importance sampling method
def IS_reg(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley value corresponding
to a list of partner and a characteristic function using the
importance sampling method and a regression model."""
start = timer()
n = len(self.scenario.partners_list)
if n < 4:
self.compute_SV()
self.name = "IS_reg Shapley values"
else:
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute some increments
permutation = np.random.permutation(n)
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
permutation = np.flip(permutation)
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
for k in range(n):
permutation = np.append(permutation[-1], permutation[:-1])
for j in range(n):
self.not_twice_characteristic(permutation[: j + 1])
# do the regressions
# make the datasets
def makedata(subset):
# compute the size of subset : ||subset||
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
size_of_S = 0
for partner in small_partners_list:
size_of_S += len(partner.y_train)
data = [size_of_S, size_of_S ** 2]
return data
datasets = []
outputs = []
for k in range(n):
x = []
y = []
for subset, incr in self.increments_values[k].items():
x.append(makedata(subset))
y.append(incr)
datasets.append(x)
outputs.append(y)
# fit the regressions
models = []
for k in range(n):
model_k = LinearRegression()
model_k.fit(datasets[k], outputs[k])
models.append(model_k)
# define the approximation
def approx_increment(subset, k):
return models[k].predict([makedata(subset)])[0]
# compute the renormalization constant of the importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
renorms.append(renorm)
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval is below the value of
# sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
for k in range(n):
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as
# prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k)
)
if cumSum / renorms[k] > u:
S = np.array(subset)
break
if cumSum / renorms[k] > u:
break
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
contributions[t - 1][k] = (
increment * renorms[k] / np.abs(approx_increment(np.array(S), k))
)
v_max = np.max(np.var(contributions, axis=0))
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the Kriging adaptive importance sampling method
def AIS_Kriging(self, sv_accuracy=0.01, alpha=0.95, update=50):
"""Return the vector of approximated Shapley value corresponding to a list of partner
and a characteristic function using the importance sampling method and a Kriging model."""
start = timer()
n = len(self.scenario.partners_list)
# definition of the original density
def prob(subset):
lS = len(subset)
return factorial(n - 1 - lS) * factorial(lS) / factorial(n)
# definition of the approximation of the increment
# compute some increments to fuel the Kriging
S = np.arange(n)
self.not_twice_characteristic(S)
for k1 in range(n):
for k2 in range(n):
S = np.array([k1])
self.not_twice_characteristic(S)
S = np.delete(np.arange(n), [k1])
self.not_twice_characteristic(S)
if k1 != k2:
S = np.array([k1, k2])
self.not_twice_characteristic(S)
S = np.delete(np.arange(n), [k1, k2])
self.not_twice_characteristic(S)
# ## do the regressions
def make_coordinate(subset, k):
assert k not in subset
# compute the size of subset : ||subset||
coordinate = np.zeros(n)
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
for partner, i in zip(small_partners_list, subset):
coordinate[i] = len(partner.y_train)
coordinate = np.delete(coordinate, k)
return coordinate
def dist(x1, x2):
return np.sqrt(np.sum((x1 - x2) ** 2))
# make the covariance functions
phi = np.zeros(n)
cov = []
for k in range(n):
phi[k] = np.median(make_coordinate(np.delete(np.arange(n), k), k))
def covk(x1, x2):
return np.exp(-dist(x1, x2) ** 2 / phi[k] ** 2)
cov.append(covk)
def make_models():
# make the datasets
datasets = []
outputs = []
for k in range(n):
x = []
y = []
for subset, incr in self.increments_values[k].items():
x.append(make_coordinate(subset, k))
y.append(incr)
datasets.append(x)
outputs.append(y)
# fit the kriging
models = []
for k in range(n):
model_k = KrigingModel(2, cov[k])
model_k.fit(datasets[k], outputs[k])
models.append(model_k)
all_models.append(models)
# define the approximation
def approx_increment(subset, k, j):
return all_models[j][k].predict(make_coordinate(subset, k))[0]
# sampling
t = 0
q = -norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
all_renorms = []
all_models = []
Subsets = [] # created like this to avoid pointer issue
# Check if the length of the confidence interval is below the value of sv_accuracy*characteristic_all_partners
while (
t < 100 or t < 4 * q ** 2 * v_max / (sv_accuracy) ** 2
):
if t == 0:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
subsets = []
if t % update == 0: # renew the importance density g
j = t // update
make_models()
# ## compute the renormalization constant of the new importance density for all datatsets
renorms = []
for k in range(n):
list_k = np.delete(np.arange(n), k)
renorm = 0
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
renorm += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k, j)
)
renorms.append(renorm)
all_renorms.append(renorms)
# generate the new increments(subset)
for k in range(n):
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(n), k)
for length_combination in range(len(list_k) + 1):
for subset in combinations(
list_k, length_combination
): # could be avoided as prob(np.array(subset))*np.abs(approx_increment(np.array(subset),j))
# is constant in the combination
cumSum += prob(np.array(subset)) * np.abs(
approx_increment(np.array(subset), k, j)
)
if cumSum / all_renorms[j][k] > u:
S = np.array(subset)
subsets.append(S)
break
if cumSum / all_renorms[j][k] > u:
break
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
contributions[t - 1][k] = (
increment * all_renorms[j][k] / np.abs(approx_increment(S, k, j))
)
Subsets.append(subsets)
shap = np.mean(contributions, axis=0)
# calcul des variances
v_max = np.max(np.var(contributions, axis=0))
t += 1
shap = np.mean(contributions, axis=0)
self.name = "<NAME>"
self.contributivity_scores = shap
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the stratified sampling method
def Stratified_MC(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley values using the stratified monte-carlo method."""
start = timer()
N = len(self.scenario.partners_list)
characteristic_all_partners = self.not_twice_characteristic(
np.arange(N)
) # Characteristic function on all partners
if N == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
# initialization
gamma = 0.2
beta = 0.0075
t = 0
sigma2 = np.zeros((N, N))
mu = np.zeros((N, N))
e = 0.0
v_max = 0
continuer = []
contributions = []
for k in range(N):
contributions.append(list())
continuer.append(list())
for k in range(N):
for strata in range(N):
contributions[k].append(list())
continuer[k].append(True)
# sampling
while np.any(continuer) or (1 - alpha) < v_max / (
sv_accuracy ** 2
): # Check if the length of the confidence interval is below the value of sv_accuracy
t += 1
e = (
1
+ 1 / (1 + np.exp(gamma / beta))
- 1 / (1 + np.exp(-(t - gamma * N) / (beta * N)))
) # e is used in the allocation to each strata, here we take the formula adviced in the litterature
for k in range(N):
# select the strata to add an increment
if np.sum(sigma2[k]) == 0:
p = np.repeat(1 / N, N) # alocate uniformly if np.sum(sigma2[k]) == 0
else:
p = (
np.repeat(1 / N, N) * (1 - e) + sigma2[k] / np.sum(sigma2[k]) * e
) # alocate more and more as according to sigma2[k] / np.sum(sigma2[k]) as t grows
strata = np.random.choice(np.arange(N), 1, p=p)[0]
# generate the increment
u = np.random.uniform(0, 1, 1)[0]
cumSum = 0
list_k = np.delete(np.arange(N), k)
for subset in combinations(list_k, strata):
cumSum += factorial(N - 1 - strata) * factorial(strata) / factorial(N - 1)
if cumSum > u:
S = np.array(subset, dtype=int)
break
SUk = np.append(S, k)
increment = self.not_twice_characteristic(
SUk
) - self.not_twice_characteristic(S)
contributions[k][strata].append(increment)
# computes the var and means of each strata
sigma2[k, strata] = np.var(contributions[k][strata])
mu[k, strata] = np.mean(contributions[k][strata])
shap = np.mean(mu, axis=1)
var = np.zeros(N) # variance of the estimator
for k in range(N):
for strata in range(N):
n_k_strata = len(contributions[k][strata])
if n_k_strata == 0:
var[k] = np.Inf
else:
var[k] += sigma2[k, strata] ** 2 / n_k_strata
if n_k_strata > 20:
continuer[k][strata] = False
var[k] /= N ** 2
v_max = np.max(var)
self.name = "Stratified MC Shapley"
self.contributivity_scores = shap
self.scores_std = np.sqrt(var)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the without replacement stratified sampling method
def without_replacment_SMC(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley values using the stratified monte-carlo method."""
start = timer()
N = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(N))
if N == 1:
self.name = "WR_SMC Shapley"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
# initialisation
t = 0
sigma2 = np.zeros((N, N))
mu = np.zeros((N, N))
v_max = 0
continuer = []
increments_generated = []
increments_to_generate = []
for k in range(N):
increments_generated.append(list())
increments_to_generate.append(list())
continuer.append(list())
for k in range(N):
for strata in range(N):
increments_generated[k].append(dict())
increments_to_generate[k].append(list())
list_k = np.delete(np.arange(N), k)
for subset in combinations(list_k, strata):
increments_to_generate[k][strata].append(str(subset))
continuer[k].append(True)
# Sampling
while np.any(continuer) or (1 - alpha) < v_max / (
sv_accuracy ** 2
): # Check if the length of the confidence interval is below the value of sv_accuracy
t += 1
for k in range(N):
# select the strata to add an increment
if | np.any(continuer[k]) | numpy.any |
from __future__ import print_function, division
import os
import torch
import pandas as pd
import skimage
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import pdb, random
from torch.utils.data import Dataset, DataLoader
import random, os, cv2
unknown_code=128
class VideoData(Dataset):
def __init__(self,csv_file,data_config,transform=None):
self.frames = pd.read_csv(csv_file,sep=';')
self.transform = transform
self.resolution=data_config['reso']
def __len__(self):
return len(self.frames)
def __getitem__(self,idx):
img = io.imread(self.frames.iloc[idx, 0])
back = io.imread(self.frames.iloc[idx, 1])
seg = io.imread(self.frames.iloc[idx, 2])
fr1 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 3]), cv2.COLOR_BGR2GRAY)
fr2 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 4]), cv2.COLOR_BGR2GRAY)
fr3 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 5]), cv2.COLOR_BGR2GRAY)
fr4 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 6]), cv2.COLOR_BGR2GRAY)
back_rnd = io.imread(self.frames.iloc[idx, 7])
sz=self.resolution
if np.random.random_sample() > 0.5:
img = cv2.flip(img,1)
seg = cv2.flip(seg,1)
back = cv2.flip(back,1)
back_rnd = cv2.flip(back_rnd,1)
fr1=cv2.flip(fr1,1); fr2=cv2.flip(fr2,1); fr3=cv2.flip(fr3,1); fr4=cv2.flip(fr4,1)
#make frames together
multi_fr=np.zeros((img.shape[0],img.shape[1],4))
multi_fr[...,0]=fr1; multi_fr[...,1]=fr2; multi_fr[...,2]=fr3; multi_fr[...,3]=fr4;
#allow random cropping centered on the segmentation map
bbox=create_bbox(seg,seg.shape[0],seg.shape[1])
img=apply_crop(img,bbox,self.resolution)
seg=apply_crop(seg,bbox,self.resolution)
back=apply_crop(back,bbox,self.resolution)
back_rnd=apply_crop(back_rnd,bbox,self.resolution)
multi_fr=apply_crop(multi_fr,bbox,self.resolution)
#convert seg to guidance map
#segg=create_seg_guide(seg,self.resolution)
sample = {'image': to_tensor(img), 'seg': to_tensor(create_seg_guide(seg,self.resolution)), 'bg': to_tensor(back), 'multi_fr': to_tensor(multi_fr), 'seg-gt':to_tensor(seg), 'back-rnd': to_tensor(back_rnd)}
if self.transform:
sample = self.transform(sample)
return sample
class AdobeDataAffineHR(Dataset):
def __init__(self,csv_file,data_config,transform=None):
self.frames = pd.read_csv(csv_file,sep=';')
self.transform = transform
self.resolution=data_config['reso']
self.trimapK=data_config['trimapK']
self.noise=data_config['noise']
def __len__(self):
return len(self.frames)
def __getitem__(self,idx):
try:
#load
fg = io.imread(self.frames.iloc[idx, 0])
alpha = io.imread(self.frames.iloc[idx, 1])
image = io.imread(self.frames.iloc[idx, 2])
back = io.imread(self.frames.iloc[idx, 3])
fg = cv2.resize(fg, dsize=(800,800))
alpha = cv2.resize(alpha, dsize=(800,800))
back = cv2.resize(back, dsize=(800,800))
image = cv2.resize(image, dsize=(800,800))
sz=self.resolution
#random flip
if np.random.random_sample() > 0.5:
alpha = cv2.flip(alpha,1)
fg = cv2.flip(fg,1)
back = cv2.flip(back,1)
image = cv2.flip(image,1)
trimap=generate_trimap(alpha,self.trimapK[0],self.trimapK[1],False)
#randcom crop+scale
different_sizes = [(576,576),(608,608),(640,640),(672,672),(704,704),(736,736),(768,768),(800,800)]
crop_size = random.choice(different_sizes)
x, y = random_choice(trimap, crop_size)
fg = safe_crop(fg, x, y, crop_size,sz)
alpha = safe_crop(alpha, x, y, crop_size,sz)
image = safe_crop(image, x, y, crop_size,sz)
back = safe_crop(back, x, y, crop_size,sz)
trimap = safe_crop(trimap, x, y, crop_size,sz)
#Perturb Background: random noise addition or gamma change
if self.noise:
if np.random.random_sample() > 0.6:
sigma=np.random.randint(low=2, high=6)
mu=np.random.randint(low=0, high=14)-7
back_tr=add_noise(back,mu,sigma)
else:
back_tr=skimage.exposure.adjust_gamma(back,np.random.normal(1,0.12))
#Create motion cues: transform foreground and create 4 additional images
affine_fr=np.zeros((fg.shape[0],fg.shape[1],4))
for t in range(0,4):
T=np.random.normal(0,5,(2,1)); theta=np.random.normal(0,7);
R=np.array([[np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta))],[np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta))]])
sc=np.array([[1+np.random.normal(0,0.05), 0],[0,1]]); sh=np.array([[1, np.random.normal(0,0.05)*(np.random.random_sample() > 0.5)],[np.random.normal(0,0.05)*(np.random.random_sample() > 0.5), 1]]);
A=np.concatenate((sc*sh*R, T), axis=1);
fg_tr = cv2.warpAffine(fg.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REFLECT)
alpha_tr = cv2.warpAffine(alpha.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_NEAREST,borderMode=cv2.BORDER_REFLECT)
sigma=np.random.randint(low=2, high=6)
mu=np.random.randint(low=0, high=14)-7
back_tr0=add_noise(back,mu,sigma)
affine_fr[...,t]=cv2.cvtColor(composite(fg_tr,back_tr0,alpha_tr), cv2.COLOR_BGR2GRAY)
sample = {'image': to_tensor(image), 'fg': to_tensor(fg), 'alpha': to_tensor(alpha), 'bg': to_tensor(back), 'trimap': to_tensor(trimap), 'bg_tr': to_tensor(back_tr), 'seg': to_tensor(create_seg(alpha,trimap)), 'multi_fr': to_tensor(affine_fr)}
if self.transform:
sample = self.transform(sample)
return sample
except Exception as e:
print("Error loading: " + self.frames.iloc[idx, 0])
print(e)
#Functions
def create_seg_guide(rcnn,reso):
kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
rcnn=rcnn.astype(np.float32)/255; rcnn[rcnn>0.2]=1;
K=25
zero_id=np.nonzero(np.sum(rcnn,axis=1)==0)
del_id=zero_id[0][zero_id[0]>250]
if len(del_id)>0:
del_id=[del_id[0]-2,del_id[0]-1,*del_id]
rcnn=np.delete(rcnn,del_id,0)
rcnn = cv2.copyMakeBorder( rcnn, 0, K + len(del_id), 0, 0, cv2.BORDER_REPLICATE)
rcnn = cv2.erode(rcnn, kernel_er, iterations=np.random.randint(10,20))
rcnn = cv2.dilate(rcnn, kernel_dil, iterations=np.random.randint(3,7))
k_size_list=[(21,21),(31,31),(41,41)]
rcnn=cv2.GaussianBlur(rcnn.astype(np.float32),random.choice(k_size_list),0)
rcnn=(255*rcnn).astype(np.uint8)
rcnn=np.delete(rcnn, range(reso[0],reso[0]+K), 0)
return rcnn
def crop_holes(img,cx,cy,crop_size):
img[cy:cy+crop_size[0],cx:cx+crop_size[1]]=0
return img
def create_seg(alpha,trimap):
#old
num_holes=np.random.randint(low=0, high=3)
crop_size_list=[(15,15),(25,25),(35,35),(45,45)]
kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
seg = (alpha>0.5).astype(np.float32)
#print('Before %.4f max: %.4f' %(seg.sum(),seg.max()))
#old
seg = cv2.erode(seg, kernel_er, iterations=np.random.randint(low=10,high=20))
seg = cv2.dilate(seg, kernel_dil, iterations=np.random.randint(low=15,high=30))
#print('After %.4f max: %.4f' %(seg.sum(),seg.max()))
seg=seg.astype(np.float32)
seg=(255*seg).astype(np.uint8)
for i in range(num_holes):
crop_size=random.choice(crop_size_list)
cx,cy = random_choice(trimap,crop_size)
seg=crop_holes(seg,cx,cy,crop_size)
trimap=crop_holes(trimap,cx,cy,crop_size)
k_size_list=[(21,21),(31,31),(41,41)]
seg=cv2.GaussianBlur(seg.astype(np.float32),random.choice(k_size_list),0)
return seg.astype(np.uint8)
def apply_crop(img,bbox,reso):
img_crop=img[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3],...];
img_crop=cv2.resize(img_crop,reso)
return img_crop
def create_bbox(mask,R,C):
where = np.array(np.where(mask))
x1, y1 = np.amin(where, axis=1)
x2, y2 = np.amax(where, axis=1)
w=np.maximum(y2-y1,x2-x1);
bd=np.random.uniform(0.1,0.4)
x1=x1- | np.round(bd*w) | numpy.round |
#coding:utf-8
###########################################################
# SpCoTMHPi: Spatial Concept-based Path-Planning Program for SIGVerse
# Path-Planning Program by A star algorithm (ver. approximate inference)
# Path Selection: minimum cost (- log-likelihood) in a path trajectory
# <NAME> 2022/02/07
# Spacial Thanks: <NAME>, <NAME>
###########################################################
##Command:
#python3 spcotmhp_astar_metric.py trialname mapname iteration sample type_gauss
#python3 spcotmhp_astar_metric.py 3LDK_01 s3LDK_01 1 0 g
import sys
import time
import numpy as np
#import scipy as sp
from scipy.stats import multivariate_normal,multinomial
import matplotlib.pyplot as plt
import spconavi_read_data
import spconavi_save_data
#import spconavi_viterbi_path_calculate as spconavi_viterbi_path_calculate
from __init__ import *
from submodules import *
tools = spconavi_read_data.Tools()
read_data = spconavi_read_data.ReadingData()
save_data = spconavi_save_data.SavingData()
#path_calculate = spconavi_viterbi_path_calculate.PathPlanner()
#Definition of action (functions in spconavi_read_data)
action_functions = [tools.right, tools.left, tools.up, tools.down, tools.stay] #, migiue, hidariue, migisita, hidarisita]
cost_of_actions = np.log( np.ones(len(action_functions)) / float(len(action_functions)) ) #[ 1/5, 1/5, 1/5, 1/5, 1/5]) #, , 1, 1, 1, 1]
"""
#GaussMap make (no use) #Ito
def PostProb_ij(Index_temp,Mu,Sig,map_length,map_width, CostMapProb,it):
if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0):
X_temp = tools.Array_index_To_Map_coordinates(Index_temp) #map と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ multivariate_normal.pdf(X_temp, mean=Mu[it], cov=Sig[it])] ##########np.array( ) !!! np.arrayにすると, numbaがエラーを吐く
PostProb = np.sum(sum_i_GaussMulti) #sum_c_ProbCtsum_i
else:
PostProb = 0.0
return PostProb
"""
#GaussMap make (use) #Ito
def PostProbMap_Gauss(CostMapProb,Mu,Sig,map_length,map_width,it): #,IndexMap):
x,y = np.meshgrid( | np.linspace(-10.0,9.92,map_width) | numpy.linspace |
import argparse
import copy
import os
from collections import defaultdict
import numpy as np
import torch
from cogdl.data import Data, DataLoader
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from sklearn.metrics import f1_score
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.svm import SVC
from sklearn.utils import shuffle as skshuffle
from tqdm import tqdm
from . import BaseTask, register_task
from .graph_classification import node_degree_as_feature
@register_task("unsupervised_graph_classification")
class UnsupervisedGraphClassification(BaseTask):
r"""Unsupervised graph classification"""
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--num-shuffle", type=int, default=10)
parser.add_argument("--degree-feature", dest="degree_feature", action="store_true")
# fmt: on
def __init__(self, args, dataset=None, model=None):
super(UnsupervisedGraphClassification, self).__init__(args)
self.device = "cpu" if not torch.cuda.is_available() or args.cpu else args.device_id[0]
dataset = build_dataset(args) if dataset is None else dataset
if "gcc" in args.model:
self.label = dataset.graph_labels[:, 0]
self.data = dataset.graph_lists
else:
self.label = np.array([data.y for data in dataset])
self.data = [
Data(x=data.x, y=data.y, edge_index=data.edge_index, edge_attr=data.edge_attr, pos=data.pos).apply(
lambda x: x.to(self.device)
)
for data in dataset
]
args.num_features = dataset.num_features
args.num_classes = args.hidden_size
args.use_unsup = True
if args.degree_feature:
self.data = node_degree_as_feature(self.data)
args.num_features = self.data[0].num_features
self.num_graphs = len(self.data)
self.num_classes = dataset.num_classes
# self.label_matrix = np.zeros((self.num_graphs, self.num_classes))
# self.label_matrix[range(self.num_graphs), np.array([data.y for data in self.data], dtype=int)] = 1
self.model = build_model(args) if model is None else model
self.model = self.model.to(self.device)
self.model_name = args.model
self.hidden_size = args.hidden_size
self.num_shuffle = args.num_shuffle
self.save_dir = args.save_dir
self.epoch = args.epoch
self.use_nn = args.model in ("infograph",)
if self.use_nn:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.data_loader = DataLoader(self.data, batch_size=args.batch_size, shuffle=True)
def train(self):
if self.use_nn:
best_model = None
best_loss = 10000
epoch_iter = tqdm(range(self.epoch))
for epoch in epoch_iter:
loss_n = []
for batch in self.data_loader:
batch = batch.to(self.device)
loss = self.model.graph_classification_loss(batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_n.append(loss.item())
loss_n = np.mean(loss_n)
epoch_iter.set_description(f"Epoch: {epoch:03d}, TrainLoss: {np.mean(loss_n)} ")
if loss_n < best_loss:
best_loss = loss_n
best_model = copy.deepcopy(self.model)
self.model = best_model
with torch.no_grad():
self.model.eval()
prediction = []
label = []
for batch in self.data_loader:
batch = batch.to(self.device)
predict = self.model(batch)
prediction.extend(predict.cpu().numpy())
label.extend(batch.y.cpu().numpy())
prediction = | np.array(prediction) | numpy.array |
import numpy as np
import pickle
import pandas as pd
from environments.interface import InterfaceEnvironment
# Necessary Data
with open("..//data//fertility.pkl", "rb") as f:
fertility_data = pickle.load(f)
with open("..//data//men_wage_path.pkl", "rb") as f:
men_wage_path_data = pickle.load(f)
with open("..//data//men_hours_empirical", "rb") as f:
men_hours_data = pickle.load(f)
men_salary_path = np.array(men_hours_data * men_wage_path_data * 46)
# calculating scales
Q_mean, Q_scale = (60 + 18) * 0.5, (60 - 18) * 0.5
K_mean, K_scale = (0 + 5) * 0.5, (5 - 0) * 0.5
G_mean, G_scale = (0 + 5) * 0.5, (5 - 0) * 0.5
Z_mean, Z_scale = (-200 + 200), (200 - (-200)) * 0.5
beta_K_mean, beta_K_scale = (-5 + 5) * 0.5, (5 - (-5)) * 0.5
beta_L_mean, beta_L_scale = (-5 + 5) * 0.5, (5 - (-5)) * 0.5
def scale_states(Q, G, K, Z, beta_K, beta_L):
Q = (Q - Q_mean) / Q_scale
G = (G - G_mean) / G_scale
K = (K - K_mean) / K_scale
Z = (Z - Z_mean) / Z_scale
beta_K = (beta_K - beta_K_mean) / beta_K_scale
beta_L = (beta_L - beta_L_mean) / beta_L_scale
return | np.array([Q, G, K, Z, beta_K, beta_L]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`blob`
==================
.. module:: blob
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2016-01-14, 11:45
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from operator import itemgetter, attrgetter
import numpy as np
from scipy import ndimage as ndi
from skimage.filters import gaussian_filter, threshold_adaptive
from skimage.measure import label
from skimage.measure import regionprops
from skimage.morphology import binary_dilation, binary_erosion
from skimage.transform import resize
from sudokuextract.exceptions import SudokuExtractError
from sudokuextract.imgproc.binary import to_binary_otsu, add_border
from sudokuextract.imgproc.geometry import get_contours
def iter_blob_contours(image, n=5):
original_shape = image.shape[::-1]
if max(original_shape) < 2000:
size = (500, 500)
y_scale = original_shape[0] / 500
x_scale = original_shape[1] / 500
else:
size = (1000, 1000)
y_scale = original_shape[0] / 1000
x_scale = original_shape[1] / 1000
img = resize(image, size)
bimg = gaussian_filter(img, sigma=1.0)
bimg = threshold_adaptive(bimg, 20, offset=2/255)
bimg = (~binary_erosion(bimg))
label_image = label(bimg, background=False)
label_image += 1
regions = regionprops(label_image)
regions.sort(key=attrgetter('area'), reverse=True)
iter_n = 0
for region in regions:
iter_n += 1
if iter_n > n:
break
try:
coords = get_contours(add_border(label_image == region.label,
size=label_image.shape,
border_size=1,
background_value=False))[0]
if np.linalg.norm(coords[0, :] - coords[-1, :]) > 1e-10:
raise SudokuExtractError("Not a closed contour.")
else:
coords = np.fliplr(coords[:-1, :])
top_left = sorted(coords, key=lambda x: np.linalg.norm(np.array(x)))[0]
top_right = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [img.shape[1], 0]))[0]
bottom_left = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [0, img.shape[0]]))[0]
bottom_right = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [img.shape[1], img.shape[0]]))[0]
tl_i = np.argmax((coords == top_left).sum(axis=1))
tr_i = np.argmax((coords == top_right).sum(axis=1))
bl_i = np.argmax((coords == bottom_left).sum(axis=1))
br_i = np.argmax((coords == bottom_right).sum(axis=1))
coords[:, 0] *= y_scale
coords[:, 1] *= x_scale
if tl_i > bl_i:
left_edge = coords[bl_i:tl_i + 1, :]
else:
coords_end_of_array = coords[bl_i:, :]
coords_start_of_array = coords[:tl_i + 1]
left_edge = np.concatenate([coords_end_of_array, coords_start_of_array], axis=0)
if tr_i > tl_i:
top_edge = coords[tl_i:tr_i + 1, :]
else:
coords_end_of_array = coords[tl_i:, :]
coords_start_of_array = coords[:tr_i + 1]
top_edge = np.concatenate([coords_end_of_array, coords_start_of_array], axis=0)
if br_i > tr_i:
right_edge = coords[tr_i:br_i + 1, :]
else:
coords_end_of_array = coords[tr_i:, :]
coords_start_of_array = coords[:br_i + 1]
right_edge = np.concatenate([coords_end_of_array, coords_start_of_array], axis=0)
if bl_i > br_i:
bottom_edge = coords[br_i:bl_i + 1, :]
else:
coords_end_of_array = coords[br_i:, :]
coords_start_of_array = coords[:bl_i + 1]
bottom_edge = np.concatenate([coords_end_of_array, coords_start_of_array], axis=0)
yield left_edge, top_edge, right_edge, bottom_edge
except Exception:
pass
raise SudokuExtractError("No suitable blob could be found.")
def iter_blob_extremes(image, n=5):
original_shape = image.shape[::-1]
if max(original_shape) < 2000:
size = (500, 500)
y_scale = original_shape[0] / 500
x_scale = original_shape[1] / 500
else:
size = (1000, 1000)
y_scale = original_shape[0] / 1000
x_scale = original_shape[1] / 1000
img = resize(image, size)
bimg = gaussian_filter(img, sigma=1.0)
bimg = threshold_adaptive(bimg, 20, offset=2/255)
bimg = -bimg
bimg = ndi.binary_fill_holes(bimg)
label_image = label(bimg, background=False)
label_image += 1
regions = regionprops(label_image)
regions.sort(key=attrgetter('area'), reverse=True)
iter_n = 0
for region in regions:
try:
iter_n += 1
if iter_n > n:
break
# Skip small images
if region.area < int(np.prod(size) * 0.05):
continue
coords = get_contours(add_border(label_image == region.label,
size=label_image.shape,
border_size=1,
background_value=False))[0]
coords = np.fliplr(coords)
top_left = sorted(coords, key=lambda x: np.linalg.norm(np.array(x)))[0]
top_right = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [img.shape[1], 0]))[0]
bottom_left = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [0, img.shape[0]]))[0]
bottom_right = sorted(coords, key=lambda x: np.linalg.norm(np.array(x) - [img.shape[1], img.shape[0]]))[0]
scaled_extremes = [(int(x[0] * y_scale), int(x[1]*x_scale)) for x in (top_left, top_right, bottom_left, bottom_right)]
yield scaled_extremes
except Exception:
pass
raise SudokuExtractError("No suitable blob could be found.")
def blobify(images):
output_data = []
mask = []
for d in images:
blob = get_centered_blob(d)
if blob is not None:
output_data.append(blob)
mask.append(True)
else:
mask.append(False)
return output_data, np.array(mask)
def _get_most_centered_blob(image):
# Label the binary image sent into connected black resp. white areas.
label_image = label(image)
blobs = []
# If only one region, skip this image.
if len(np.unique(label_image)) == 1:
return None
# Create intensity image.
i_img = 255 - image
i_img[i_img == 255] = 1
for region in regionprops(label_image, i_img):
# Test 1: If the region is too small to be interesting: skip it.
if region.area < (np.prod(image.shape) * 0.010):
continue
# Test 2: If the region a white one: skip it.
if region.max_intensity < 0.9:
continue
# Extract bounding box.
a_min, b_min, a_max, b_max = region.bbox
a_dist = a_max - a_min
b_dist = b_max - b_min
# Removed test 2.5 on disproportionate x to y dimensions.
# if (np.min([a_dist, b_dist]) / np.max([a_dist, b_dist])) < 0.1:
# continue
# Test 3: If the bounding box is larger than half of the image, check
# whether or not the region's relative filled area is smaller than 0.5.
# If so: skip it!
if (a_dist * b_dist) > (np.prod(image.shape) * 0.5):
if (region.solidity / (a_dist * b_dist)) < 0.5:
continue
# Make bounding box square, centered with respect to the largest side.
if a_dist < b_dist:
a_mid = (a_min + a_max) // 2
a_max = a_mid + int(np.ceil(b_dist / 2))
a_min = a_mid - int(np.ceil(b_dist / 2))
blob_image = np.zeros((a_max - a_min, b_max - b_min), 'uint8')
start_point = blob_image.shape[0] // 2 - region.image.shape[0] // 2
blob_image[start_point:start_point + region.image.shape[0], :] = region.image.copy()
if any(np.array((a_min, b_min - 1, a_max, b_max + 1)) < 0):
continue
else:
try:
b_mid = (b_min + b_max) // 2
b_max = b_mid + int(np.ceil(a_dist / 2))
b_min = b_mid - int(np.ceil(a_dist / 2))
blob_image = np.zeros((a_max - a_min, b_max - b_min), 'uint8')
start_point = blob_image.shape[1] // 2 - region.image.shape[1] // 2
blob_image[:, start_point:start_point + region.image.shape[1]] = region.image.copy() * 255
if any(np.array((a_min - 1, b_min, a_max + 1, b_max)) < 0):
continue
except Exception as e:
pass
# Invert image to regain the original black/white status.
blob_image = 255 - blob_image
# Calculate the distance of the blob intensity centroid to the image center.
wc_dist = np.linalg.norm( | np.array(image.shape) | numpy.array |
#!/opt/anaconda3/bin/python3
""" Class definitions and a main routine to simulate traffic """
import os
import subprocess
import glob
import numpy as np
import matplotlib.pyplot as plt
class Car:
""" A class for modeling cars in traffic """
def __init__(self, aggression=1.0, ahead=None):
""" Constructor
Args:
aggression: The driver's aggressiveness
ahead: The car ahead
"""
self.aggression = aggression
self.ahead = ahead
self.position = 0
self.velocity = 0
def drive(self, time, time_step=0.001):
""" Drive the car
Args:
time_step: The time step, seconds
"""
displacement = (self.ahead.position - self.position)
velocity_diff = (self.ahead.velocity - self.velocity)
# Model driving as a linear spring and a linear damper
force = self.aggression * (displacement + 0.25 * velocity_diff)
self.position += time_step * self.velocity
self.velocity += time_step * force
class Traffic:
""" A class to simulate traffic """
def __init__(self, length=1, average_speed=1):
""" Constructor
Args:
length: The length of the traffic
average_speed: The average speed of the traffic
"""
self.length = length
self.average_speed = average_speed
self.time = 0
self.cars = []
self.positions = -1 * | np.arange(self.length) | numpy.arange |
"""Unit tests for pair module."""
import json
import tempfile
import unittest
import numpy
import relentless
class test_PairParameters(unittest.TestCase):
"""Unit tests for relentless.pair.PairParameters"""
def test_init(self):
"""Test creation from data"""
types = ('A','B')
pairs = (('A','B'), ('B','B'), ('A','A'))
params = ('energy', 'mass')
#test construction with tuple input
m = relentless.potential.PairParameters(types=('A','B'), params=('energy','mass'))
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with list input
m = relentless.potential.PairParameters(types=['A','B'], params=('energy','mass'))
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with mixed tuple/list input
m = relentless.potential.PairParameters(types=('A','B'), params=['energy','mass'])
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with int type parameters
with self.assertRaises(TypeError):
m = relentless.potential.PairParameters(types=('A','B'), params=(1,2))
#test construction with mixed type parameters
with self.assertRaises(TypeError):
m = relentless.potential.PairParameters(types=('A','B'), params=('1',2))
def test_param_types(self):
"""Test various get and set methods on pair parameter types"""
m = relentless.potential.PairParameters(types=('A','B'), params=('energy', 'mass'))
self.assertEqual(m.shared['energy'], None)
self.assertEqual(m.shared['mass'], None)
self.assertEqual(m['A','A']['energy'], None)
self.assertEqual(m['A','A']['mass'], None)
self.assertEqual(m['A','B']['energy'], None)
self.assertEqual(m['A','B']['mass'], None)
self.assertEqual(m['B','B']['energy'], None)
self.assertEqual(m['B','B']['mass'], None)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting shared params
m.shared.update(energy=1.0, mass=2.0)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], None)
self.assertEqual(m['A','A']['mass'], None)
self.assertEqual(m['A','B']['energy'], None)
self.assertEqual(m['A','B']['mass'], None)
self.assertEqual(m['B','B']['energy'], None)
self.assertEqual(m['B','B']['mass'], None)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting per-pair params
m['A','A'].update(energy=1.5, mass=2.5)
m['A','B'].update(energy=2.0, mass=3.0)
m['B','B'].update(energy=0.5, mass=0.7)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], 1.5)
self.assertEqual(m['A','A']['mass'], 2.5)
self.assertEqual(m['A','B']['energy'], 2.0)
self.assertEqual(m['A','B']['mass'], 3.0)
self.assertEqual(m['B','B']['energy'], 0.5)
self.assertEqual(m['B','B']['mass'], 0.7)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting per-type params
m['A'].update(energy=0.1, mass=0.2)
m['B'].update(energy=0.2, mass=0.1)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], 1.5)
self.assertEqual(m['A','A']['mass'], 2.5)
self.assertEqual(m['A','B']['energy'], 2.0)
self.assertEqual(m['A','B']['mass'], 3.0)
self.assertEqual(m['B','B']['energy'], 0.5)
self.assertEqual(m['B','B']['mass'], 0.7)
self.assertEqual(m['A']['energy'], 0.1)
self.assertEqual(m['A']['mass'], 0.2)
self.assertEqual(m['B']['energy'], 0.2)
self.assertEqual(m['B']['mass'], 0.1)
class LinPot(relentless.potential.PairPotential):
"""Linear potential function used to test relentless.potential.PairPotential"""
def __init__(self, types, params):
super().__init__(types, params)
def _energy(self, r, m, **params):
r,u,s = self._zeros(r)
u[:] = m*r
if s:
u = u.item()
return u
def _force(self, r, m, **params):
r,f,s = self._zeros(r)
f[:] = -m
if s:
f = f.item()
return f
def _derivative(self, param, r, **params):
r,d,s = self._zeros(r)
if param == 'm':
d[:] = r
if s:
d = d.item()
return d
class TwoVarPot(relentless.potential.PairPotential):
"""Mock potential function used to test relentless.potential.PairPotential.derivative"""
def __init__(self, types, params):
super().__init__(types, params)
def _energy(self, r, x, y, **params):
pass
def _force(self, r, x, y, **params):
pass
def _derivative(self, param, r, **params):
#not real derivative, just used to test functionality
r,d,s = self._zeros(r)
if param == 'x':
d[:] = 2*r
elif param == 'y':
d[:] = 3*r
if s:
d = d.item()
return d
class test_PairPotential(unittest.TestCase):
"""Unit tests for relentless.potential.PairPotential"""
def test_init(self):
"""Test creation from data"""
#test creation with only m
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 3.5
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and rmin
p = LinPot(types=('1',), params=('m','rmin'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmin'] = 0.0
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = 0.0
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and rmax
p = LinPot(types=('1',), params=('m','rmax'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmax'] = 1.0
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = 1.0
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and shift
p = LinPot(types=('1',), params=('m','shift'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['shift'] = True
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = True
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with all params
p = LinPot(types=('1',), params=('m','rmin','rmax','shift'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmin'] = 0.0
p.coeff['1','1']['rmax'] = 1.0
p.coeff['1','1']['shift'] = True
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = 0.0
coeff['1','1']['rmax'] = 1.0
coeff['1','1']['shift'] = True
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
def test_energy(self):
"""Test energy method"""
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 2.0
#test with no cutoffs
u = p.energy(pair=('1','1'), r=0.5)
self.assertAlmostEqual(u, 1.0)
u = p.energy(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(u, [0.5,1.5])
#test with rmin set
p.coeff['1','1']['rmin'] = 0.5
u = p.energy(pair=('1','1'), r=0.6)
self.assertAlmostEqual(u, 1.2)
u = p.energy(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(u, [1.0,1.5])
#test with rmax set
p.coeff['1','1'].update(rmin=False, rmax=1.5)
u = p.energy(pair=('1','1'), r=1.0)
self.assertAlmostEqual(u, 2.0)
u = p.energy(pair=('1','1'), r=[0.25,1.75])
numpy.testing.assert_allclose(u, [0.5,3.0])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = 0.5
u = p.energy(pair=('1','1'), r=0.75)
self.assertAlmostEqual(u, 1.5)
u = p.energy(pair=('1','1'), r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(u, [1.0,1.0,3.0,3.0])
#test with shift set
p.coeff['1','1'].update(shift=True)
u = p.energy(pair=('1','1'), r=0.5)
self.assertAlmostEqual(u, -2.0)
u = p.energy(pair=('1','1'), r=[0.25,0.75,1.0,1.5])
numpy.testing.assert_allclose(u, [-2.0,-1.5,-1.0,0.0])
#test with shift set without rmax
p.coeff['1','1'].update(rmax=False)
with self.assertRaises(ValueError):
u = p.energy(pair=('1','1'), r=0.5)
def test_force(self):
"""Test force method"""
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 2.0
#test with no cutoffs
f = p.force(pair=('1','1'), r=0.5)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(f, [-2.0,-2.0])
#test with rmin set
p.coeff['1','1']['rmin'] = 0.5
f = p.force(pair=('1','1'), r=0.6)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(f, [0.0,-2.0])
#test with rmax set
p.coeff['1','1'].update(rmin=False, rmax=1.5)
f = p.force(pair=('1','1'), r=1.0)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,1.75])
numpy.testing.assert_allclose(f, [-2.0,0.0])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = 0.5
f = p.force(pair=('1','1'), r=0.75)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(f, [0.0,-2.0,-2.0,0.0])
#test with shift set
p.coeff['1','1'].update(shift=True)
f = p.force(pair=('1','1'), r=0.5)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[1.0,1.5])
numpy.testing.assert_allclose(f, [-2.0,-2.0])
def test_derivative_values(self):
"""Test derivative method with different param values"""
p = LinPot(types=('1',), params=('m',))
x = relentless.variable.DesignVariable(value=2.0)
p.coeff['1','1']['m'] = x
#test with no cutoffs
d = p.derivative(pair=('1','1'), var=x, r=0.5)
self.assertAlmostEqual(d, 0.5)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.75])
numpy.testing.assert_allclose(d, [0.25,0.75])
#test with rmin set
rmin = relentless.variable.DesignVariable(value=0.5)
p.coeff['1','1']['rmin'] = rmin
d = p.derivative(pair=('1','1'), var=x, r=0.6)
self.assertAlmostEqual(d, 0.6)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.75])
numpy.testing.assert_allclose(d, [0.5,0.75])
#test with rmax set
rmax = relentless.variable.DesignVariable(value=1.5)
p.coeff['1','1'].update(rmin=False, rmax=rmax)
d = p.derivative(pair=('1','1'), var=x, r=1.0)
self.assertAlmostEqual(d, 1.0)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,1.75])
numpy.testing.assert_allclose(d, [0.25,1.5])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = rmin
d = p.derivative(pair=('1','1'), var=x, r=0.75)
self.assertAlmostEqual(d, 0.75)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(d, [0.5,0.5,1.5,1.5])
#test w.r.t. rmin and rmax
d = p.derivative(pair=('1','1'), var=rmin, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [2.0,0.0,0.0])
d = p.derivative(pair=('1','1'), var=rmax, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [0.0,0.0,2.0])
#test parameter derivative with shift set
p.coeff['1','1'].update(shift=True)
d = p.derivative(pair=('1','1'), var=x, r=0.5)
self.assertAlmostEqual(d, -1.0)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,1.0,1.5,1.75])
numpy.testing.assert_allclose(d, [-1.0,-0.5,0.0,0.0])
#test w.r.t. rmin and rmax, shift set
d = p.derivative(pair=('1','1'), var=rmin, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [2.0,0.0,0.0])
d = p.derivative(pair=('1','1'), var=rmax, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [-2.0,-2.0,0.0])
def test_derivative_types(self):
"""Test derivative method with different param types."""
q = LinPot(types=('1',), params=('m',))
x = relentless.variable.DesignVariable(value=4.0)
y = relentless.variable.DesignVariable(value=64.0)
z = relentless.variable.GeometricMean(x, y)
q.coeff['1','1']['m'] = z
#test with respect to dependent variable parameter
d = q.derivative(pair=('1','1'), var=z, r=2.0)
self.assertAlmostEqual(d, 2.0)
#test with respect to independent variable on which parameter is dependent
d = q.derivative(pair=('1','1'), var=x, r=1.5)
self.assertAlmostEqual(d, 3.0)
d = q.derivative(pair=('1','1'), var=y, r=4.0)
self.assertAlmostEqual(d, 0.5)
#test invalid derivative w.r.t. scalar
a = 2.5
q.coeff['1','1']['m'] = a
with self.assertRaises(TypeError):
d = q.derivative(pair=('1','1'), var=a, r=2.0)
#test with respect to independent variable which is related to a SameAs variable
r = TwoVarPot(types=('1',), params=('x','y'))
r.coeff['1','1']['x'] = x
r.coeff['1','1']['y'] = relentless.variable.SameAs(x)
d = r.derivative(pair=('1','1'), var=x, r=4.0)
self.assertAlmostEqual(d, 20.0)
r.coeff['1','1']['y'] = x
r.coeff['1','1']['x'] = relentless.variable.SameAs(x)
d = r.derivative(pair=('1','1'), var=x, r=4.0)
self.assertAlmostEqual(d, 20.0)
def test_iteration(self):
"""Test iteration on PairPotential object"""
p = LinPot(types=('1','2'), params=('m',))
for pair in p.coeff:
p.coeff[pair]['m'] = 2.0
p.coeff[pair]['rmin'] = 0.0
p.coeff[pair]['rmax'] = 1.0
self.assertDictEqual(p.coeff['1','1'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
self.assertDictEqual(p.coeff['1','2'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
self.assertDictEqual(p.coeff['2','2'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
def test_save(self):
"""Test saving to file"""
temp = tempfile.NamedTemporaryFile()
p = LinPot(types=('1',), params=('m','rmin','rmax'))
p.coeff['1','1']['m'] = 2.0
p.coeff['1','1']['rmin'] = 0.0
p.coeff['1','1']['rmax'] = 1.0
p.coeff['1','1']['shift'] = True
p.save(temp.name)
with open(temp.name, 'r') as f:
x = json.load(f)
self.assertEqual(p.coeff['1','1']['m'], x["('1', '1')"]['m'])
self.assertEqual(p.coeff['1','1']['rmin'], x["('1', '1')"]['rmin'])
self.assertEqual(p.coeff['1','1']['rmax'], x["('1', '1')"]['rmax'])
self.assertEqual(p.coeff['1','1']['shift'], x["('1', '1')"]['shift'])
temp.close()
class test_LennardJones(unittest.TestCase):
"""Unit tests for relentless.potential.LennardJones"""
def test_init(self):
"""Test creation from data"""
lj = relentless.potential.LennardJones(types=('1',))
coeff = relentless.potential.PairParameters(types=('1',),
params=('epsilon','sigma','rmin','rmax','shift'))
for pair in coeff.pairs:
coeff[pair]['rmin'] = False
coeff[pair]['rmax'] = False
coeff[pair]['shift'] = False
self.assertCountEqual(lj.coeff.types, coeff.types)
self.assertCountEqual(lj.coeff.params, coeff.params)
def test_energy(self):
"""Test _energy method"""
lj = relentless.potential.LennardJones(types=('1',))
#test scalar r
r_input = 0.5
u_actual = 0
u = lj._energy(r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([0,1,1.5])
u_actual = numpy.array([numpy.inf,-0.061523438,-0.0054794417])
u = lj._energy(r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._energy(r=r_input, epsilon=1.0, sigma=-1.0)
def test_force(self):
"""Test _force method"""
lj = relentless.potential.LennardJones(types=('1',))
#test scalar r
r_input = 0.5
f_actual = 48
f = lj._force(r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([0,1,1.5])
f_actual = numpy.array([numpy.inf,-0.36328125,-0.02188766])
f = lj._force(r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._force(r=r_input, epsilon=1.0, sigma=-1.0)
def test_derivative(self):
"""Test _derivative method"""
lj = relentless.potential.LennardJones(types=('1',))
#w.r.t. epsilon
#test scalar r
r_input = 0.5
d_actual = 0
d = lj._derivative(param='epsilon', r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,-0.061523438,-0.0054794417])
d = lj._derivative(param='epsilon', r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. sigma
#test scalar r
r_input = 0.5
d_actual = 48
d = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,-0.7265625,-0.06566298])
d = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(d, d_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=-1.0)
#test invalid param
with self.assertRaises(ValueError):
u = lj._derivative(param='simga', r=r_input, epsilon=1.0, sigma=1.0)
class test_PairSpline(unittest.TestCase):
"""Unit tests for relentless.potential.PairSpline"""
def test_init(self):
"""Test creation from data"""
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
self.assertEqual(s.num_knots, 3)
self.assertEqual(s.mode, 'diff')
coeff = relentless.potential.PairParameters(types=('1',),
params=('r-0','r-1','r-2','knot-0','knot-1','knot-2','rmin','rmax','shift'))
self.assertCountEqual(s.coeff.types, coeff.types)
self.assertCountEqual(s.coeff.params, coeff.params)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
self.assertEqual(s.num_knots, 3)
self.assertEqual(s.mode, 'value')
coeff = relentless.potential.PairParameters(types=('1',),
params=('r-0','r-1','r-2','knot-0','knot-1','knot-2','rmin','rmax','shift'))
self.assertCountEqual(s.coeff.types, coeff.types)
self.assertCountEqual(s.coeff.params, coeff.params)
#test invalid number of knots
with self.assertRaises(ValueError):
s = relentless.potential.PairSpline(types=('1',), num_knots=1)
#test invalid mode
with self.assertRaises(ValueError):
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='val')
def test_from_array(self):
"""Test from_array method and knots generator"""
r_arr = [1,2,3]
u_arr = [9,4,1]
u_arr_diff = [5,3,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
for i,(r,k) in enumerate(s.knots(pair=('1','1'))):
self.assertAlmostEqual(r.value, r_arr[i])
self.assertAlmostEqual(k.value, u_arr_diff[i])
self.assertEqual(r.const, True)
if i == s.num_knots-1:
self.assertEqual(k.const, True)
else:
self.assertEqual(k.const, False)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
for i,(r,k) in enumerate(s.knots(pair=('1','1'))):
self.assertAlmostEqual(r.value, r_arr[i])
self.assertAlmostEqual(k.value, u_arr[i])
self.assertEqual(r.const, True)
if i == s.num_knots-1:
self.assertEqual(k.const, True)
else:
self.assertEqual(k.const, False)
#test invalid r and u shapes
r_arr = [2,3]
with self.assertRaises(ValueError):
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
r_arr = [1,2,3]
u_arr = [1,2]
with self.assertRaises(ValueError):
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
def test_energy(self):
"""Test energy method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
u_actual = numpy.array([6.25,2.25,1])
u = s.energy(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(u, u_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
u_actual = numpy.array([6.25,2.25,1])
u = s.energy(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(u, u_actual)
#test PairSpline with 2 knots
s = relentless.potential.PairSpline(types=('1',), num_knots=2, mode='value')
s.from_array(pair=('1','1'), r=[1,2], u=[4,2])
u = s.energy(pair=('1','1'), r=1.5)
self.assertAlmostEqual(u, 3)
def test_force(self):
"""Test force method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
f_actual = numpy.array([5,3,0])
f = s.force(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(f, f_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
f_actual = numpy.array([5,3,0])
f = s.force(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(f, f_actual)
#test PairSpline with 2 knots
s = relentless.potential.PairSpline(types=('1',), num_knots=2, mode='value')
s.from_array(pair=('1','1'), r=[1,2], u=[4,2])
f = s.force(pair=('1','1'), r=1.5)
self.assertAlmostEqual(f, 2)
def test_derivative(self):
"""Test derivative method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
d_actual = numpy.array([1.125,0.625,0])
param = list(s.knots(('1','1')))[1][1]
d = s.derivative(pair=('1','1'), var=param, r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(d, d_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
d_actual = numpy.array([0.75,0.75,0])
param = list(s.knots(('1','1')))[1][1]
d = s.derivative(pair=('1','1'), var=param, r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(d, d_actual)
class test_Yukawa(unittest.TestCase):
"""Unit tests for relentless.potential.Yukawa"""
def test_init(self):
"""Test creation from data"""
y = relentless.potential.Yukawa(types=('1',))
coeff = relentless.potential.PairParameters(types=('1',), params=('epsilon','kappa','rmin','rmax','shift'))
for pair in coeff.pairs:
coeff[pair]['rmin'] = False
coeff[pair]['rmax'] = False
coeff[pair]['shift'] = False
self.assertCountEqual(y.coeff.types, coeff.types)
self.assertCountEqual(y.coeff.params, coeff.params)
def test_energy(self):
"""Test _energy method"""
y = relentless.potential.Yukawa(types=('1',))
#test scalar r
r_input = 0.5
u_actual = 1.5576016
u = y._energy(r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([0,1,1.5])
u_actual = numpy.array([numpy.inf,0.60653066,0.31491104])
u = y._energy(r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._energy(r=r_input, epsilon=1.0, kappa=-1.0)
def test_force(self):
"""Test _force method"""
y = relentless.potential.Yukawa(types=('1',))
#test scalar r
r_input = 0.5
f_actual = 3.8940039
f = y._force(r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([0,1,1.5])
f_actual = numpy.array([numpy.inf,0.90979599,0.36739621])
f = y._force(r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._force(r=r_input, epsilon=1.0, kappa=-1.0)
def test_derivative(self):
"""Test _derivative method"""
y = relentless.potential.Yukawa(types=('1',))
#w.r.t. epsilon
#test scalar r
r_input = 0.5
d_actual = 1.5576016
d = y._derivative(param='epsilon', r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,0.60653066,0.31491104])
d = y._derivative(param='epsilon', r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. kappa
#test scalar r
r_input = 0.5
d_actual = -0.77880078
d = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([-1,-0.60653066,-0.47236655])
d = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(d, d_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=-1.0)
#test invalid param
with self.assertRaises(ValueError):
u = y._derivative(param='kapppa', r=r_input, epsilon=1.0, kappa=1.0)
class test_Depletion(unittest.TestCase):
"""Unit tests for relentless.potential.Depletion"""
def test_init(self):
"""Test creation from data"""
dp = relentless.potential.Depletion(types=('1','2'))
coeff = relentless.potential.PairParameters(types=('1','2'),
params=('P','sigma_i','sigma_j','sigma_d','rmin','rmax','shift'))
self.assertCountEqual(dp.coeff.types, coeff.types)
self.assertCountEqual(dp.coeff.params, coeff.params)
def test_cutoff_init(self):
"""Test creation of Depletion.Cutoff from data"""
#create object dependent on scalars
w = relentless.potential.Depletion.Cutoff(sigma_i=1.0, sigma_j=2.0, sigma_d=0.25)
self.assertAlmostEqual(w.value, 1.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v.value for p,v in w.depends},
{'sigma_i':1.0, 'sigma_j':2.0, 'sigma_d':0.25})
#change parameter value
w.sigma_j.value = 4.0
self.assertAlmostEqual(w.value, 2.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v.value for p,v in w.depends},
{'sigma_i':1.0, 'sigma_j':4.0, 'sigma_d':0.25})
#create object dependent on variables
a = relentless.variable.DesignVariable(value=1.0)
b = relentless.variable.DesignVariable(value=2.0)
c = relentless.variable.DesignVariable(value=0.25)
w = relentless.potential.Depletion.Cutoff(sigma_i=a, sigma_j=b, sigma_d=c)
self.assertAlmostEqual(w.value, 1.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v for p,v in w.depends},
{'sigma_i':a, 'sigma_j':b, 'sigma_d':c})
#change parameter value
b.value = 4.0
self.assertAlmostEqual(w.value, 2.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v for p,v in w.depends},
{'sigma_i':a, 'sigma_j':b, 'sigma_d':c})
def test_cutoff_derivative(self):
"""Test Depletion.Cutoff._derivative method"""
w = relentless.potential.Depletion.Cutoff(sigma_i=1.0, sigma_j=2.0, sigma_d=0.25)
#calculate w.r.t. sigma_i
dw = w._derivative('sigma_i')
self.assertEqual(dw, 0.5)
#calculate w.r.t. sigma_j
dw = w._derivative('sigma_j')
self.assertEqual(dw, 0.5)
#calculate w.r.t. sigma_d
dw = w._derivative('sigma_d')
self.assertEqual(dw, 1.0)
#invalid parameter calculation
with self.assertRaises(ValueError):
dw = w._derivative('sigma')
def test_energy(self):
"""Test _energy and energy methods"""
dp = relentless.potential.Depletion(types=('1',))
#test scalar r
r_input = 3
u_actual = -4.6786414
u = dp._energy(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([1.75,4.25])
u_actual = numpy.array([-16.59621119,0])
u = dp._energy(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=-1, sigma_j=1, sigma_d=1)
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=1, sigma_j=-1, sigma_d=1)
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=1, sigma_j=1, sigma_d=-1)
#test energy outside of low/high bounds
dp.coeff['1','1'].update(P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
r_input = numpy.array([1,5])
u_actual = numpy.array([-25.7514468,0])
u = dp.energy(pair=('1','1'), r=r_input)
numpy.testing.assert_allclose(u, u_actual)
self.assertAlmostEqual(dp.coeff['1','1']['rmax'].value, 4.25)
def test_force(self):
"""Test _force and force methods"""
dp = relentless.potential.Depletion(types=('1',))
#test scalar r
r_input = 3
f_actual = -7.0682426
f = dp._force(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([1.75,4.25])
f_actual = numpy.array([-11.54054444,0])
f = dp._force(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative sigma
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=-1, sigma_j=1, sigma_d=1)
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=1, sigma_j=-1, sigma_d=1)
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=1, sigma_j=1, sigma_d=-1)
#test force outside of low/high bounds
dp.coeff['1','1'].update(P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
r_input = numpy.array([1,5])
f_actual = numpy.array([-12.5633027,0])
f = dp.force(pair=('1','1'), r=r_input)
numpy.testing.assert_allclose(f, f_actual)
self.assertAlmostEqual(dp.coeff['1','1']['rmax'].value, 4.25)
def test_derivative(self):
"""Test _derivative and derivative methods"""
dp = relentless.potential.Depletion(types=('1',))
#w.r.t. P
#test scalar r
r_input = 3
d_actual = -4.6786414
d = dp._derivative(param='P', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([1.75,4.25])
d_actual = | numpy.array([-16.59621119,0]) | numpy.array |
import umap
import deepview.Stochastic_Embedding as stocemb
import deepview.config as defaults
import numpy as np
import abc
def init_umap(config):
n_neighbors = config.get('n_neighbors', defaults.n_neighbors)
min_dist = config.get('min_dist', defaults.min_dist)
spread = config.get('spread', defaults.spread)
random_state = config.get('random_state', defaults.random_state)
verbose = config.get('verbose', defaults.verbose)
return umap.UMAP(metric='precomputed', n_neighbors=n_neighbors,
random_state=random_state, spread=spread, min_dist=min_dist, verbose=verbose)
def init_inv_umap(config):
neighbors = config.get('neighbor_frac', defaults.neighbor_frac)
centroids = config.get('centroid_frac', defaults.centroid_frac)
smoothing_epochs = config.get('smoothing_epochs',
defaults.smoothing_epochs)
smoothing_neighbors = config.get('smoothing_neighbors',
defaults.smoothing_neighbors)
max_iter = config.get('max_iter', defaults.max_iter)
a_scale = config.get('a', defaults.a)
b = config.get('b', defaults.b)
return InvMapper(neighbors, centroids, smoothing_epochs,
smoothing_neighbors, max_iter, a_scale, b)
class Mapper(abc.ABC):
def __init__(self):
pass
def __call__(self, x):
return self.transform(x)
@abc.abstractclassmethod
def transform(self, x):
pass
@abc.abstractclassmethod
def fit(self, x, y=None):
pass
class InvMapper(stocemb.StochasticEmbedding):
def __init__(self, neighbors, centroids, smoothing_epochs, smoothing_neighbors,
max_iter, a, b):
super(InvMapper, self).__init__()
self.neighbors = neighbors
self.centroids = centroids
self.max_iter = max_iter
self.a_scale = a
self.b = b
self.n_smoothing_epochs = smoothing_epochs
self.n_smoothing_neighbors = smoothing_neighbors
def fit(self, x, y):
'''
x : embedded,
y : x
'''
self.data_shape = y.shape[1:]
flat_dim = np.prod(y.shape[1:])
y_flat = np.reshape(y, [-1, flat_dim])
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
av_range = | np.mean(x_max - x_min) | numpy.mean |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as pyplt
from scipy.sparse import csr_matrix, linalg
from numpy.random import default_rng
rng = default_rng(1)
class ElasticTriObj:
G = | np.array([[1,0], [0, 1], [-1, -1]]) | numpy.array |
"""
Mostly copied from wandb client code
Modified "next_sample" code to do the following:
-accepts a 'failure_cost' argument
-if failure cost 'c' is nonzero, modifies expected improvement of each
sample according to:
e' = p e / (p (1-c) + c)
where 'p' is probability of success and 'e' is unmodified expected improvement
-returns expected improvements for whole sample
Bayesian Search
Check out https://arxiv.org/pdf/1206.2944.pdf
for explanation of bayesian optimization
We do bayesian optimization and handle the cases where some X values are integers
as well as the case where X is very large.
"""
import numpy as np
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.gaussian_process.kernels import Matern
#import scipy.stats as stats
import math
#from wandb.util import get_module
#from wandb.sweeps.base import Search
#from wandb.sweeps.params import HyperParameter, HyperParameterSet
#sklearn.gaussian = get_module('sklearn.gaussian_process')
#sklearn.linear = get_module('sklearn.linear_model')
#sklearn.svm = get_module('sklearn.svm')
#sklearn.discriminant = get_module('sklearn.discriminant_analysis')
#scipy.stats = get_module('scipy.stats')
import sklearn.gaussian_process as gaussian
import sklearn.linear_model as linear_model
import sklearn.svm as svm
import sklearn.discriminant_analysis as discriminant
import scipy.stats
def fit_normalized_gaussian_process(X, y, nu=1.5):
"""
We fit a gaussian process but first subtract the mean and divide by stddev.
To undo at prediction tim, call y_pred = gp.predict(X) * y_stddev + y_mean
"""
gp = gaussian.GaussianProcessRegressor(
kernel=gaussian.kernels.Matern(nu=nu), n_restarts_optimizer=2, alpha=0.0000001, random_state=2
)
if len(y) == 1:
y = np.array(y)
y_mean = y[0]
y_stddev = 1
else:
y_mean = np.mean(y)
y_stddev = np.std(y) + 0.0001
y_norm = (y - y_mean) / y_stddev
gp.fit(X, y_norm)
return gp, y_mean, y_stddev
def train_logistic_regression(X, y):
lr = linear.LogisticRegression()
lr.fit(X, y.astype(int))
return lambda X : lr.predict_proba(X)[...,1], 0, 1
def train_rbf_svm(X, y):
svc = svm.SVC(probability=True)
svc.fit(X, y.astype(int))
return lambda X : svc.predict_proba(X)[...,1], 0, 1
def train_qda(X,y):
qda = discriminant.QuadraticDiscriminantAnalysis()
qda.fit(X, y.astype(int))
return lambda X : qda.predict_proba(X)[...,1], 0, 1
def sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
def random_sample(X_bounds, num_test_samples):
num_hyperparameters = len(X_bounds)
test_X = np.empty((num_test_samples, num_hyperparameters))
for ii in range(num_test_samples):
for jj in range(num_hyperparameters):
if type(X_bounds[jj][0]) == int:
assert (type(X_bounds[jj][1]) == int)
test_X[ii, jj] = np.random.randint(
X_bounds[jj][0], X_bounds[jj][1])
else:
test_X[ii, jj] = | np.random.uniform() | numpy.random.uniform |
# occiput.io
# <NAME>
# Martinos Center for Biomedical Imaging, Harvard University/MGH, Boston
# Aalto University, Finland
# 2013-2015
# Code to generate subsets for OSEM (ordered subsets expectation maximization) and algorithms of that sort, such
# as stochastic optimization.
from __future__ import absolute_import, print_function
__all__ = ["SubsetGenerator"]
from numpy import uint32, ones, zeros
from numpy.random import randint
from ...Core.Errors import *
class SubsetGenerator:
"""This object has the machinery to generate subsets of the axial and longitudinal directions for PET
reconstruction (e.g. OSEM algorithm).
It implements various algorithms to create the subsets. """
def __init__(self, N_azimuthal, N_axial):
self._N_axial = N_axial
self._N_azimu = N_azimuthal
self._index = 0
def new_subset(self, mode, subset_size, azimuthal_range=None):
"""Returns a new subset. """
if mode == 'random':
return self._subsets_random_no_replacement(subset_size, azimuthal_range)
elif mode == 'random_axial':
return self._subsets_random_axial(subset_size, azimuthal_range)
elif mode == 'ordered_axial':
return self._subsets_ordered_axial(subset_size, azimuthal_range)
else:
raise UnexpectedParameter("'mode' parameter %s not recognised." % str(mode))
def all_active(self):
"""Returns full set. """
return ones((self._N_azimu, self._N_axial), dtype=uint32, order="C")
def _subsets_random_no_replacement(self, subset_size, azimuthal_range=None):
"""Generates subsets randomly - no replacement. """
if subset_size is None:
return self.all_active()
if subset_size >= self._N_axial * self._N_azimu:
return self.all_active()
M = zeros((self._N_azimu, self._N_axial), dtype=uint32, order="C")
n = 0
while n < subset_size:
active_axial = randint(self._N_axial)
if azimuthal_range is None:
active_azimu = randint(self._N_azimu)
else:
active_azimu = azimuthal_range[randint(len(azimuthal_range))]
if M[active_azimu, active_axial] == 0:
M[active_azimu, active_axial] = 1
n += 1
return M
def _subsets_ordered_axial(self, subset_size, azimuthal_range=None):
"""Generates ordered subsets; use all azimuthal angles, subsample axially. """
if subset_size is None:
return self.all_active()
if subset_size >= self._N_axial * self._N_azimu:
return self.all_active()
M = zeros((self._N_azimu, self._N_axial), dtype=uint32, order="C")
for i in range(self._index, self._N_axial, self._N_axial // subset_size):
if azimuthal_range is None:
M[:, i] = 1
else:
M[azimuthal_range, i] = 1
self._index += 1
if self._index == self._N_axial / subset_size:
self._index = 0
return M
def _subsets_random_axial(self, subset_size, azimuthal_range=None):
"""Generates random subsets; use all azimuthal angles, subsample axially. """
if subset_size is None:
return self.all_active()
if subset_size >= self._N_axial * self._N_azimu:
return self.all_active()
M = zeros((self._N_azimu, self._N_axial), dtype=uint32, order="C")
n = 0
while n < subset_size:
active_axial = | randint(self._N_axial) | numpy.random.randint |
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from ..spectrum import ArraySpectralElement, ArraySourceSpectrum
class TestFlip(object):
def setup_class(self):
self.waveup = np.arange(10000, 10100, 10)
self.wavedown = self.waveup[::-1]
self.t_up = np.arange(10) + 5
self.t_flip = self.t_up.copy()[::-1]
@pytest.mark.parametrize(
'cls', [ArraySpectralElement, ArraySourceSpectrum])
def test_flip(self, cls):
up = cls(self.waveup, self.t_up)
down = cls(self.wavedown, self.t_up[::-1])
assert_array_equal(up(self.waveup), self.t_up)
assert_array_equal(up(self.wavedown), self.t_flip)
assert_array_equal(down(self.waveup), self.t_up)
assert_array_equal(down(self.wavedown), self.t_flip)
class TestNumpyInterp(object):
def setup_class(self):
self.Y = np.arange(10) + 5
def test1(self):
A = np.arange(10)
X = np.arange(10)
ans = np.interp(A, X, self.Y)
assert_array_equal(ans, self.Y)
def test2(self):
A = np.arange(10)[::-1]
X = np.arange(10)[::-1]
ans = np.interp(A[::-1], X[::-1], self.Y[::-1])
assert_array_equal(ans, self.Y[::-1])
def test3(self):
A = np.arange(10)
X = np.arange(10)[::-1]
ans = np.interp(A, X[::-1], self.Y[::-1])
| assert_array_equal(ans, self.Y[::-1]) | numpy.testing.assert_array_equal |
import cv2
import numpy as np
import alglib.processing as processing
lower1 = | np.array([0, 30, 0]) | numpy.array |
import numpy as np
def autocorr(x):
"""
Computes the ( normalised) auto-correlation function of a
one dimensional sequence of numbers.
Utilises the numpy correlate function that is based on an efficient
convolution implementation.
Inputs:
x - one dimensional numpy array
Outputs:
Vector of autocorrelation values for a lag from zero to max possible
"""
# normalise, compute norm
xunbiased = x - np.mean(x)
xnorm = np.sum(xunbiased ** 2)
# convolve with itself
acor = | np.correlate(xunbiased, xunbiased, mode='same') | numpy.correlate |
import numpy as np
import ot3D
import matplotlib.pyplot as plt
import scipy.sparse as sps
class LaguerreTri():
def __init__(self):
self.__Tri = ot3D.OTproblem()
def printCGALVERSION(self) :
self.__Tri.printCGALVERSION()
def setCoordinates(self,position):
self.__Tri.setLaguerrePosition(position[:,0],position[:,1],position[:,2])
def setPolyLine(self,polyLine,weights):
self.nb_polyLinept=polyLine.shape[0]
self.__Tri.setPolyline(polyLine[:,0],polyLine[:,1],polyLine[:,2],weights)
def setPolyLineNew(self,polyLine,weights):
self.nb_polyLinept=polyLine.shape[0]//2
self.__Tri.setPolylineNew(polyLine[:,0],polyLine[:,1],polyLine[:,2],weights)
def getPolyline(self,i):
return np.array(self.__Tri.getContributorLine(i))
def compute(self,psi,Hessian=False):
self.__Tri.setLaguerrePsi(psi)
n = psi.shape[0]
self.n = psi.shape[0]
mass = np.zeros(n)
cost = np.zeros(n)
Bar = np.zeros(3*n)
self.__Tri.computeAll(mass,cost,Bar)
Bar = np.reshape(Bar,(n,3),order='C')
grad = 1./float(n) - mass
if Hessian :
R = self.__Tri.computeHessian()
Rsparse = sps.coo_matrix((np.array(R.val),(np.array(R.row),np.array(R.col))),shape=(R.getDimRow(),R.getDimCol()))
return mass,Bar,cost,grad,Rsparse
else :
return mass,Bar,cost,grad
def plotEdges(self,ax,i,dim=3) :
edges=self.__Tri.getadjEdge(i)
if dim==3 :
for e in edges :
ax.plot([e[0],e[3]],[e[1],e[4]],[e[2],e[5]],c='#959595')
else :
for e in edges :
ax.plot([e[0],e[3]],[e[1],e[4]],c='#959595')
def plotList(self,l,i,dim=3) :
edges=self.__Tri.getadjEdge(i)
if dim==3 :
for e in edges :
l.append([[e[0],e[3]],[e[1],e[4]],[e[2],e[5]]])
else :
for e in edges :
l.append([[e[0],e[3]],[e[1],e[4]]])
return l
def plotIntersection(self,ax,i,dim=3) :
inter=self.__Tri.getCutPosition(i)
if dim==3 :
for e in inter :
ax.scatter(e[0],e[1],e[2],c='black')
else :
for e in inter :
ax.scatter(e[0],e[1],c='black')
def derivativePol(self) :
dmass = np.zeros(6*self.nb_polyLinept)
dcost = np.zeros(6*self.nb_polyLinept)
self.__Tri.computeDeriv(dmass,dcost)
dmass = np.reshape(dmass,(self.nb_polyLinept,6),order='C')
dcost = np.reshape(dcost,(self.nb_polyLinept,6),order='C')
return dmass,dcost
def derivativePolTotal(self) :
dmass = np.zeros(6*self.nb_polyLinept)
dcost = np.zeros(6*self.nb_polyLinept)
drho = np.zeros(6*self.nb_polyLinept)
self.__Tri.computeDerivTotal(dmass,dcost,drho)
dmass = np.reshape(dmass,(self.nb_polyLinept,6),order='C')
dcost = np.reshape(dcost,(self.nb_polyLinept,6),order='C')
drho = np.reshape(drho ,(self.nb_polyLinept,6),order='C')
return dmass,dcost,drho
def computePolyinfo(self) :
massSeen = np.zeros(self.nb_polyLinept)
costPaid = np.zeros(self.nb_polyLinept)
self.__Tri.computePolyInfo(massSeen,costPaid)
return massSeen, costPaid
def setMassDiracs(self,w):
self.__Tri.setMassDiracs(w)
def perfomOptimisationBFGS(self,gradTol,nMaxIter,w1,w2,wMaxIter,memSize=15,parallelism=False,nbThreads=-1):
self.__Tri.perfomOptimPsiBFGS(gradTol,nMaxIter,w1,w2,wMaxIter,memSize,parallelism,nbThreads)
psiOpt = np.zeros(self.n)
self.__Tri.getOptimizedPsi(psiOpt)
return psiOpt,self.__Tri.getOptimizationCF(),self.__Tri.getOptimizationCV(),self.__Tri.getOptimizationtime()
def perfomOptimisationNew(self,gradTol,nMaxIter,w1,w2,wMaxIter,memSize=15,leven=1e-5,psi=None,parallelism=False,nbThreads=-1):
if psi is not None:
self.__Tri.perfomOptimPsiBFGSUnleashDaBeastRestart(psi,gradTol,nMaxIter,w1,w2,wMaxIter,memSize,leven,parallelism,nbThreads)
else:
self.__Tri.perfomOptimPsiBFGSUnleashDaBeast(gradTol,nMaxIter,w1,w2,wMaxIter,memSize,leven,parallelism,nbThreads)
psiOpt = | np.zeros(self.n) | numpy.zeros |
# MIT License
#
# Copyright (c) 2019-2020 Tskit Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Python implementation of the Li and Stephens algorithms.
"""
import itertools
import unittest
import msprime
import numpy as np
import pytest
import _tskit # TMP
import tskit
from tests import tsutil
def in_sorted(values, j):
# Take advantage of the fact that the numpy array is sorted.
ret = False
index = np.searchsorted(values, j)
if index < values.shape[0]:
ret = values[index] == j
return ret
def ls_forward_matrix_naive(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm using Python loops.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
S = np.zeros(m)
f = np.zeros(n) + 1 / n
for el in range(0, m):
for j in range(n):
# NOTE Careful with the difference between this expression and
# the Viterbi algorithm below. This depends on the different
# normalisation approach.
p_t = f[j] * (1 - rho[el]) + rho[el] / n
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
f[j] = p_t * p_e
S[el] = np.sum(f)
# TODO need to handle the 0 case.
assert S[el] > 0
f /= S[el]
F[el] = f
return F, S
def ls_viterbi_naive(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS Viterbi algorithm using Python loops.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
L = np.ones(n)
T = [set() for _ in range(m)]
T_dest = np.zeros(m, dtype=int)
for el in range(m):
# The calculation below is undefined otherwise.
if len(alleles[el]) > 1:
assert mu[el] <= 1 / (len(alleles[el]) - 1)
L_next = np.zeros(n)
for j in range(n):
# NOTE Careful with the difference between this expression and
# the Forward algorithm above. This depends on the different
# normalisation approach.
p_no_recomb = L[j] * (1 - rho[el] + rho[el] / n)
p_recomb = rho[el] / n
if p_no_recomb > p_recomb:
p_t = p_no_recomb
else:
p_t = p_recomb
T[el].add(j)
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
L_next[j] = p_t * p_e
L = L_next
j = np.argmax(L)
T_dest[el] = j
if L[j] == 0:
assert mu[el] == 0
raise ValueError(
"Trying to match non-existent allele with zero mutation rate"
)
L /= L[j]
P = np.zeros(m, dtype=int)
P[m - 1] = T_dest[m - 1]
for el in range(m - 1, 0, -1):
j = P[el]
if j in T[el]:
j = T_dest[el - 1]
P[el - 1] = j
return P
def ls_viterbi_vectorised(h, alleles, G, rho, mu):
# We must have a non-zero mutation rate, or we'll end up with
# division by zero problems.
# assert np.all(mu > 0)
m, n = G.shape
alleles = check_alleles(alleles, m)
V = np.ones(n)
T = [None for _ in range(m)]
max_index = np.zeros(m, dtype=int)
for site in range(m):
# Transition
p_neq = rho[site] / n
p_t = (1 - rho[site] + rho[site] / n) * V
recombinations = np.where(p_neq > p_t)[0]
p_t[recombinations] = p_neq
T[site] = recombinations
# Emission
p_e = np.zeros(n) + mu[site]
index = G[site] == h[site]
if h[site] == tskit.MISSING_DATA:
# Missing data is considered equal to everything
index[:] = True
p_e[index] = 1 - (len(alleles[site]) - 1) * mu[site]
V = p_t * p_e
# Normalise
max_index[site] = np.argmax(V)
# print(site, ":", V)
if V[max_index[site]] == 0:
assert mu[site] == 0
raise ValueError(
"Trying to match non-existent allele with zero mutation rate"
)
V /= V[max_index[site]]
# Traceback
P = np.zeros(m, dtype=int)
site = m - 1
P[site] = max_index[site]
while site > 0:
j = P[site]
if in_sorted(T[site], j):
j = max_index[site - 1]
P[site - 1] = j
site -= 1
return P
def check_alleles(alleles, num_sites):
"""
Checks the specified allele list and returns a list of lists
of alleles of length num_sites.
If alleles is a 1D list of strings, assume that this list is used
for each site and return num_sites copies of this list.
Otherwise, raise a ValueError if alleles is not a list of length
num_sites.
"""
if isinstance(alleles[0], str):
return [alleles for _ in range(num_sites)]
if len(alleles) != num_sites:
raise ValueError("Malformed alleles list")
return alleles
def ls_forward_matrix(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm using numpy vectorisation.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
S = np.zeros(m)
f = np.zeros(n) + 1 / n
p_e = np.zeros(n)
for el in range(0, m):
p_t = f * (1 - rho[el]) + rho[el] / n
eq = G[el] == h[el]
if h[el] == tskit.MISSING_DATA:
# Missing data is equal to everything
eq[:] = True
p_e[:] = mu[el]
p_e[eq] = 1 - (len(alleles[el]) - 1) * mu[el]
f = p_t * p_e
S[el] = np.sum(f)
# TODO need to handle the 0 case.
assert S[el] > 0
f /= S[el]
F[el] = f
return F, S
def forward_matrix_log_proba(F, S):
"""
Given the specified forward matrix and scaling factor array, return the
overall log probability of the input haplotype.
"""
return np.sum(np.log(S)) - np.log(np.sum(F[-1]))
def ls_forward_matrix_unscaled(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
f = np.zeros(n) + 1 / n
for el in range(0, m):
s = np.sum(f)
for j in range(n):
p_t = f[j] * (1 - rho[el]) + s * rho[el] / n
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
f[j] = p_t * p_e
F[el] = f
return F
# TODO change this to use the log_proba function below.
def ls_path_probability(h, path, G, rho, mu):
"""
Returns the probability of the specified path through the genotypes for the
specified haplotype.
"""
# Assuming num_alleles = 2
assert rho[0] == 0
m, n = G.shape
# TODO It's not entirely clear why we're starting with a proba of 1 / n for the
# model. This was done because it made it easier to compare with an existing
# HMM implementation. Need to figure this one out when writing up.
proba = 1 / n
for site in range(0, m):
pe = mu[site]
if h[site] == G[site, path[site]] or h[site] == tskit.MISSING_DATA:
pe = 1 - mu[site]
pt = rho[site] / n
if site == 0 or path[site] == path[site - 1]:
pt = 1 - rho[site] + rho[site] / n
proba *= pt * pe
return proba
def ls_path_log_probability(h, path, alleles, G, rho, mu):
"""
Returns the log probability of the specified path through the genotypes for the
specified haplotype.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
# TODO It's not entirely clear why we're starting with a proba of 1 / n for the
# model. This was done because it made it easier to compare with an existing
# HMM implementation. Need to figure this one out when writing up.
log_proba = np.log(1 / n)
for site in range(0, m):
if len(alleles[site]) > 1:
assert mu[site] <= 1 / (len(alleles[site]) - 1)
pe = mu[site]
if h[site] == G[site, path[site]] or h[site] == tskit.MISSING_DATA:
pe = 1 - (len(alleles[site]) - 1) * mu[site]
assert 0 <= pe <= 1
pt = rho[site] / n
if site == 0 or path[site] == path[site - 1]:
pt = 1 - rho[site] + rho[site] / n
assert 0 <= pt <= 1
log_proba += | np.log(pt) | numpy.log |
import sys
import numpy as np
import matplotlib.pyplot as plt
args = sys.argv
if len(args) != 4:
print('Error')
quit()
f = open(args[1],'r')
window = int(args[2])
step = int(args[3])
sequense = ""
for line in f:
if line[0] != '>':
sequense += line.rstrip('\n')
gnum = sequense.count('G')
cnum = sequense.count('C')
##print(sequense)
print('gc含量:{}'.format((gnum+cnum)/len(sequense)))
i = 0
gc = []
position = []
while i < len(sequense):
gnum = sequense[i:i+window].count('G')
cnum = sequense[i:i+window].count('C')
gc.append((gnum+cnum)/window)
print((gnum+cnum)/window)
position.append(i)
i += step
left = np.array(position)
height = | np.array(gc) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# # Actions and Orbit caluclation with MC sampling for GALAH DR3
#
# ## Author: <NAME>
#
# ### History:
# 181011 SB Created
# 190222 SB Included sampling with 5D covariance matrix and fixed galpy coordinate transformation for J2015.5 in ICRS
# 201001 SB Change to McMillan17 potential, including different RO and VO
# # What information you need
#
# ra, dec, pmra, pmdec from Gaia DR2
#
# distance:
# if you want to use parallax: parallax and parallax_uncertainty
# if you want to use covariances: covariance entries from Gaia DR2
# if you want to use Bailer-Jones distances: r_est, r_lo, r_hi
# if you want to use BSTEP: dist_gbm, e_dist_gbm
#
# vlos:
# if you want to use rv_galah: rv_galah, e_rv_galah
# if you want to use rv_gaia: rv_gaia, e_rv_gaia
# In[ ]:
# Preamble for notebook
# Compatibility with Python 3
from __future__ import (absolute_import, division, print_function)
try:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format='retina'")
except:
pass
# Start timer
import time
start = time.time()
# Basic packages
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
import os
import sys
import glob
import pickle
import collections
import pandas
# Packages to work with FITS and (IDL) SME.out files
import astropy.io.fits as pyfits
import astropy.table as table
import astropy.coordinates as coord
import astropy.units as u
import math
from astropy.table import Table, hstack, vstack
from scipy.io.idl import readsav
# Matplotlib and associated packages for plotting
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.transforms import Bbox,TransformedBbox
from matplotlib.image import BboxImage
from matplotlib.legend_handler import HandlerBase
from matplotlib._png import read_png
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
import matplotlib.colors as colors
params = {
'font.family' : 'sans',
'font.size' : 17,
'axes.labelsize' : 20,
'ytick.labelsize' : 16,
'xtick.labelsize' : 16,
'legend.fontsize' : 20,
'text.usetex' : True,
'text.latex.preamble': [r'\usepackage{upgreek}', r'\usepackage{amsmath}'],
}
plt.rcParams.update(params)
_parula_data = [[0.2081, 0.1663, 0.5292],
[0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286],
[0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279],
[0.1707285714, 0.2919380952, 0.779247619],
[0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333],
[0.0116952381, 0.3875095238, 0.8819571429],
[0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333],
[0.032852381, 0.4430428571, 0.8719571429],
[0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952],
[0.0722666667, 0.4886666667, 0.8467],
[0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524],
[0.0749428571, 0.5375428571, 0.8262714286],
[0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714],
[0.0343428571, 0.5965809524, 0.819852381],
[0.0265, 0.6137, 0.8135],
[0.0238904762, 0.6286619048, 0.8037619048],
[0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429],
[0.0266619048, 0.6641952381, 0.7607190476],
[0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667],
[0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714],
[0.1801333333, 0.7176571429, 0.6424333333],
[0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714],
[0.3021714286, 0.7376047619, 0.5711857143],
[0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571],
[0.4420095238, 0.7480809524, 0.5033142857],
[0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857],
[0.5708571429, 0.7485190476, 0.4493904762],
[0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188],
[0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857],
[0.7858428571, 0.7355666667, 0.3632714286],
[0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714],
[0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905],
[0.9449571429, 0.7261142857, 0.2886428571],
[0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619],
[0.9990428571, 0.7653142857, 0.2164142857],
[0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667],
[0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381],
[0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571],
[0.9598238095, 0.9218333333, 0.0948380952],
[0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
parula = ListedColormap(_parula_data, name='parula')
parula_zero = _parula_data[0]
parula_0 = ListedColormap(_parula_data, name='parula_0')
parula_0.set_bad((1,1,1))
parula_r = ListedColormap(_parula_data[::-1], name='parula_r')
willi_blau = [0.0722666667, 0.4886666667, 0.8467]
# In[ ]:
debug = False
# ### Galpy initialization
#
# We are using the McMillan17 potential from McMillan, 2017, MNRAS, 465, 76.
# Contrary to galpy, its normalisation parameters are:
# r_gc = 8.21 kpc (galpy: 8.0 kpc, Gravity Collaboration, 2018, A&A, 615, 15: 8.178 kpc).
# v_gc = 233.1 km/s (galpy: 220 km/s)
# In[ ]:
import galpy
#from galpy.potential import MWPotential2014 as pot
from galpy.potential.mwpotentials import McMillan17 as pot
from galpy.util.bovy_conversion import get_physical
from galpy.actionAngle import actionAngleStaeckel
from galpy.orbit import Orbit
# Reference values
#r_galactic_centre = 8.178*u.kpc # Gravity Collaboration, 2019, A&A, 625, 10
r_galactic_centre = 8.21*u.kpc # McMillan Potential, 2017
z_galactic_plane = 25.0*u.pc # Bland-Hawthorn & Gerhard, 2016, ARA&A, 54, 529
print('Reference frame:')
print('R_GC = '+str(r_galactic_centre)+' (McMillan, 2017, MNRAS, 465, 76)')
print('phi_GC = '+str(0*u.rad))
print('z_GC = '+str(z_galactic_plane)+' (Bland-Hawthorn & Gerhard, 2016, ARA&A, 54, 529)')
v_total_sun = (np.tan(6.379*u.mas)*r_galactic_centre/u.yr).to(u.km/u.s) # pm_l by Reid & Brunthaler 2004, ApJ, 616, 872
print('V_total_sun: = '+"{:.2f}".format(v_total_sun)+' (Reid & Brunthaler 2004, ApJ, 616, 872)')
v_peculiar = [11.1, 15.17, 7.25]*u.km/u.s # U and W from Schoenrich, Binney, Dehnen, 2010, MNRAS, 403, 1829, V so that V = V_total-V_sun
print('V_peculiar = ',(v_peculiar),' (U and W from Schoenrich, Binney, Dehnen, 2010, MNRAS, 403, 1829)')
print('V-component of V_peculiar = 15.17 km/s, instead of 12.24 km/s by Schoenrich et al. (2010), for matching v_circular')
v_circular = np.round(v_total_sun-v_peculiar[1],1)
print('V_circular = ',(v_circular),' (McMillan, 2017, MNRAS, 465, 76)')
aAS = actionAngleStaeckel(
pot = pot, #potential
delta = 0.45, #focal length of confocal coordinate system
c = True #use C code (for speed)
)
#(RA = 17:45:37.224 h:m:s, Dec = −28:56:10.23 deg) (Reid& Brunthaler 2004)
# ### Let's get the Solar values
# In[ ]:
calculate_sun = True
if calculate_sun:
sun = dict()
# Create the Orbit instance
o = Orbit(
#ra, dec, dist, pm_ra, pm_dec, v_los
vxvv=[0.*u.deg,0.*u.deg,0.*u.kpc,0.*u.mas/u.yr, 0.*u.mas/u.yr,0.*u.km/u.s],
ro=r_galactic_centre,
vo=v_circular,
zo=z_galactic_plane,
solarmotion=[-11.1, 15.17, 7.25]*u.km/u.s,
#solarmotion='schoenrich',
radec=True
)
#Galactocentric coordinates:
sun['X_XYZ'] = o.helioX()#*u.kpc
sun['Y_XYZ'] = o.helioY()#*u.kpc
sun['Z_XYZ'] = o.helioZ()#*u.kpc
sun['U_UVW'] = o.U()#*u.km/u.s
sun['V_UVW'] = o.V()#*u.km/u.s
sun['W_UVW'] = o.W()#*u.km/u.s
sun['R_Rzphi'] = o.R()#*u.kpc
sun['phi_Rzphi'] = o.phi()#*u.rad
sun['z_Rzphi'] = o.z()#*u.kpc
sun['vR_Rzphi'] = o.vR()#*u.km/u.s
sun['vphi_Rzphi'] = o.vphi()#*u.km/u.s
sun['vz_Rzphi'] = o.vz()#*u.km/u.s
sun['vT_Rzphi'] = o.vT()#*u.km/u.s
try:
sun['J_R'], sun['L_Z'],sun['J_Z'], sun['Omegar'], sun['Omegaphi'], sun['Omegaz'], angler,anglephi,anglez = aAS.actionsFreqsAngles(
#R,vR,vT,z,vz[,phi]
sun['R_Rzphi']*u.kpc,
sun['vR_Rzphi']*u.km/u.s,
sun['vT_Rzphi']*u.km/u.s,
sun['z_Rzphi']*u.kpc,
sun['vz_Rzphi']*u.km/u.s,
sun['phi_Rzphi']*u.rad,
ro=r_galactic_centre,vo=v_circular
)
except:
sun['Omegar'] = [np.nan]
sun['Omegaphi'] = [np.nan]
sun['Omegaz'] = [np.nan]
try:
sun['J_R'], sun['L_Z'],sun['J_Z'] = aAS(
#R,vR,vT,z,vz[,phi]
sun['R_Rzphi']*u.kpc,
sun['vR_Rzphi']*u.km/u.s,
sun['vT_Rzphi']*u.km/u.s,
sun['z_Rzphi']*u.kpc,
sun['vz_Rzphi']*u.km/u.s,
sun['phi_Rzphi']*u.rad,
ro=r_galactic_centre,vo=v_circular
)
except:
sun['J_R'] = [np.nan]
sun['L_Z'] = [np.nan]
sun['J_Z'] = [np.nan]
try:
sun['ecc'], sun['zmax'], sun['R_peri'], sun['R_ap'] = aAS.EccZmaxRperiRap(
#R,vR,vT,z,vz[,phi]
sun['R_Rzphi']*u.kpc,
sun['vR_Rzphi']*u.km/u.s,
sun['vT_Rzphi']*u.km/u.s,
sun['z_Rzphi']*u.kpc,
sun['vz_Rzphi']*u.km/u.s,
sun['phi_Rzphi']*u.rad,
ro=r_galactic_centre,vo=v_circular
)
sun['zmax']
sun['R_peri']
sun['R_peri']
except:
sun['ecc'] = [np.nan]
sun['zmax'] = [np.nan]
sun['R_peri'] = [np.nan]
sun['R_ap'] = [np.nan]
sun['Energy'] = o.E(pot=pot,ro=r_galactic_centre,vo=v_circular,zo=z_galactic_plane)
print('Solar values:')
print('X,Y,Z: '+"{:.2f}".format(sun['X_XYZ'])+' '+"{:.2f}".format(sun['Y_XYZ'])+' '+"{:.2f}".format(sun['Z_XYZ']))
print('U,V,W: '+"{:.2f}".format(sun['U_UVW'])+' '+"{:.2f}".format(sun['V_UVW'])+' '+"{:.2f}".format(sun['W_UVW']))
print('R,phi,z: '+"{:.2f}".format(sun['R_Rzphi'])+' '+"{:.2f}".format(sun['phi_Rzphi'])+' '+"{:.2f}".format(sun['z_Rzphi']))
print('vR,vphi,vT,vz: '+"{:.2f}".format(sun['vR_Rzphi'])+' '+"{:.2f}".format(sun['vphi_Rzphi'])+' '+"{:.2f}".format(sun['vT_Rzphi'])+' '+"{:.2f}".format(sun['vz_Rzphi']))
print('J_R,L_Z,J_Z: '+"{:.2f}".format(sun['J_R'][0])+' '+"{:.2f}".format(sun['L_Z'][0])+' '+"{:.2f}".format(sun['J_Z'][0]))
print('Omega R/phi/z: '+"{:.2f}".format(sun['Omegar'][0])+' '+"{:.2f}".format(sun['Omegaphi'][0])+' '+"{:.2f}".format(sun['Omegaz'][0]))
print('ecc, zmax, R_peri, R_apo: '+"{:.2f}".format(sun['ecc'][0])+' '+"{:.2f}".format(sun['zmax'][0])+' '+"{:.2f}".format(sun['R_peri'][0])+' '+"{:.2f}".format(sun['R_ap'][0]))
print('Energy: '+"{:.2f}".format(sun['Energy']))
# ### Input of 6D information in observable dimensions
# In[ ]:
try:
galah_gaia_input = pyfits.getdata('/shared-storage/buder/svn-repos/trunk/GALAH/GALAH_DR3/catalogs/GALAH_DR3_main_200604_extended_caution_v2.fits',1)
out_dir = '/shared-storage/buder/svn-repos/trunk/GALAH/GALAH_DR3/processing/VAC_dynamics/'
except:
try:
galah_gaia_input = pyfits.getdata('/Users/svenbuder/GALAH_DR3/catalogs/GALAH_DR3_main_200604_extended_caution_v2.fits',1)
out_dir = '/Users/svenbuder/GALAH_DR3/processing/VAC_dynamics/'
except:
galah_gaia_input = pyfits.getdata('/avatar/buder/trunk/GALAH/GALAH_DR3/catalogs/GALAH_DR3_main_200604_extended_caution_v2.fits',1)
out_dir = '/avatar/buder/trunk/GALAH/GALAH_DR3/processing/VAC_dynamics/'
full_length = len(galah_gaia_input['sobject_id'])
print("Initial nr. of entries")
print(full_length)
subset_size = 10000
try:
subset = int(sys.argv[1])
except:
subset = -1
if subset != -1:
if subset*subset_size >= full_length:
sys.exit('The subset is beyond the length of GALAH DR3')
galah_gaia_input = galah_gaia_input[subset*subset_size:np.min([(subset+1)*subset_size,full_length])]
nr_galah_stars = len(galah_gaia_input['sobject_id'])
print("Nr. stars per subset")
print(nr_galah_stars)
nr_galah_stars_dynamics = np.where(
np.isfinite(galah_gaia_input['ra']) &
np.isfinite(galah_gaia_input['dec']) &
np.isfinite(galah_gaia_input['r_est']) &
np.isfinite(galah_gaia_input['pmra']) &
np.isfinite(galah_gaia_input['pmdec']) &
#np.isfinite(galah_gaia_input['rv_guess']) &
np.isfinite(galah_gaia_input['ra_error']) &
np.isfinite(galah_gaia_input['dec_error']) &
np.isfinite(galah_gaia_input['r_hi']) &
np.isfinite(galah_gaia_input['r_lo']) &
np.isfinite(galah_gaia_input['pmra_error']) &
np.isfinite(galah_gaia_input['pmdec_error']) &
#np.isfinite(galah_gaia_input['e_rv_guess']) &
#(galah_gaia_input['rv_guess'] != 999.) &
#(galah_gaia_input['rv_guess'] != 1024.) &
(
(
| np.isfinite(galah_gaia_input['rv_galah']) | numpy.isfinite |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Common functions to transform image.
Code: https://github.com/fepegar/torchio
"""
# Import
import numpy as np
from scipy.spatial.transform import Rotation
from scipy.ndimage import map_coordinates
from .transform import compose
from .transform import gaussian_random_field
from .transform import affine_flow
from .utils import interval
def affine(arr, rotation=10, translation=10, zoom=0.2, order=3, dist="uniform",
seed=None):
""" Random affine transformation.
The affine translation & rotation parameters are drawn from a lognormal
distribution - small movements are assumed to occur more often and large
movements less frequently - or from a uniform distribution.
Parameters
----------
arr: array
the input data.
rotation: float or 2-uplet, default 10
the rotation in degrees of the simulated movements. Larger
values generate more distorted images.
translation: float or 2-uplet, default 10
the translation in voxel of the simulated movements. Larger
values generate more distorted images.
zoom: float, default 0.2
the zooming magnitude. Larger values generate more distorted images.
order: int, default 3
the order of the spline interpolation in the range [0, 5].
dist: str, default 'uniform'
the sampling distribution: 'uniform' or 'lognormal'.
seed: int, default None
seed to control random number generator.
Returns
-------
transformed: array
the transformed input data.
"""
rotation = interval(rotation)
translation = interval(translation)
random_rotations = random_generator(
rotation, arr.ndim, dist=dist, seed=seed)
random_translations = random_generator(
translation, arr.ndim, dist=dist, seed=seed)
np.random.seed(seed)
random_zooms = np.random.uniform(
low=(1 - zoom), high=(1 + zoom), size=arr.ndim)
random_rotations = Rotation.from_euler(
"xyz", random_rotations, degrees=True)
random_rotations = random_rotations.as_matrix()
affine = compose(random_translations, random_rotations, random_zooms)
shape = arr.shape
flow = affine_flow(affine, shape)
locs = flow.reshape(len(shape), -1)
transformed = map_coordinates(arr, locs, order=order, cval=0)
return transformed.reshape(shape)
def flip(arr, axis=None, seed=None):
""" Apply a random mirror flip.
Parameters
----------
arr: array
the input data.
axis: int, default None
apply flip on the specified axis. If not specified, randomize the
flip axis.
seed: int, default None
seed to control random number generator.
Returns
-------
transformed: array
the transformed input data.
"""
if axis is None:
np.random.seed(seed)
axis = np.random.randint(low=0, high=arr.ndim, size=1)[0]
return np.flip(arr, axis=axis)
def deformation(arr, max_displacement=4, alpha=3, order=3, seed=None):
""" Apply dense random elastic deformation.
Reference: <NAME>, <NAME>, <NAME>., Simulating Longitudinal
Brain MRIs with Known Volume Changes and Realistic Variations in Image
Intensity, Front Neurosci, 2017.
Parameters
----------
arr: array
the input data.
max_displacement: float, default 4
the maximum displacement in voxel along each dimension. Larger
values generate more distorted images.
alpha: float, default 3
the power of the power-law momentum distribution. Larger values
genrate smoother fields.
order: int, default 3
the order of the spline interpolation in the range [0, 5].
seed: int, default None
seed to control random number generator.
Returns
-------
transformed: array
the transformed input data.
"""
kwargs = {"seed": seed}
flow_x = gaussian_random_field(
arr.shape[:2], alpha=alpha, normalize=True, **kwargs)
flow_x /= flow_x.max()
flow_x = np.asarray([flow_x] * arr.shape[-1]).transpose(1, 2, 0)
if seed is not None:
kwargs = {"seed": seed + 2}
flow_y = gaussian_random_field(
arr.shape[:2], alpha=alpha, normalize=True, **kwargs)
flow_y /= flow_y.max()
flow_y = | np.asarray([flow_y] * arr.shape[-1]) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import axes
from scipy import stats
from scipy import interpolate
from scipy import linalg
def round_lims(values, round_factor=0.5):
"""
Identify rounded minimum and maximum based on appropriate power of 10
and round_factor.
round_place = 10 ** ceil( log10((max-min))-1 )
Minimum = (floor(min / round_place / round_factor)
* round_place * round_factor)
Maximum = (ceil(max / round_place / round_factor)
* round_place * round_factor)
E.g. [10, 39, 43] yields (10, 50) with round_factor = 1 (nearest 10)
[10, 39, 43] yields (0, 100) with round_factor = 10 (nearest 100)
[10, 39, 43] yields (0, 45) with round_factor = 0.5 (nearest 5)
Args:
values (np.ndarray, list): vector of values of interest.
round_factor (float): multiplicative factor for rounding power
(Default = 0.5).
Returns:
lims: tuple of (rounded minimum, rounded maximum)
"""
min_val = np.min(values)
max_val = np.max(values)
round_place = 10 ** np.ceil(np.log10(np.ptp([min_val, max_val])) - 1)
rounded_min = (np.floor(min_val / round_place / round_factor)
* round_place * round_factor)
rounded_max = (np.ceil(max_val / round_place / round_factor)
* round_place * round_factor)
lims = (rounded_min, rounded_max)
tick_factor = round_place * round_factor
return lims, tick_factor
def density_scatter(references,
predictions,
ax=None,
loglog=False,
lims=None,
lim_factor=0.5,
subset_threshold=1000,
cmap=None,
metrics=True,
text_size=10,
units=None,
labels=True,
label_size=10,
**scatter_kwargs):
"""
Plot regression performance with a scatter plot of predictions vs.
references, colored by log-density of points. Optionally display
mean-absolute error, root-mean-square error, minimum residual,
and maximum residual.
Args:
references (list, np.ndarray): Vector of Y-axis values.
predictions (list, np.ndarray): Vector of X-axis values.
ax (axes.Axes): Optional handle for existing matplotlib axis object
loglog (bool): whether to plot on a log-log scale.
lims (tuple): lower and upper bounds for axis limits.
lim_factor (float): tuning factor for automatically determining limits.
subset_threshold (int): maximum number of points to plot.
If exceeded, subset will be selected randomly.
cmap (matplotlib.colors.LinearSegmentedColormap): color map.
metrics (bool): plot text with metrics e.g. root-mean-square-error.
text_size (int): fontsize for metrics text.
units (str): units for axis labels.
labels (bool): add axis labels.
label_size (int): fontsize for axis and tick labels.
**scatter_kwargs: keyword arguments for plt.scatter function.
Returns:
fig & ax: matplotlib figure and axis.
"""
if ax is None:
fig, ax = plt.subplots()
fig_tuple = (fig, ax)
else:
fig_tuple = (None, None)
if 's' not in scatter_kwargs.keys():
scatter_kwargs['s'] = 1 # default marker size
if cmap is None:
cmap = cm.viridis
x = np.array(references)
y = np.array(predictions)
# Compute metrics, e.g. RMSE, before selecting random subset.
residuals = np.subtract(y, x)
mae = np.mean(np.abs(residuals))
rmse = np.sqrt(np.mean(residuals ** 2))
max_over = np.max(residuals)
max_under = np.min(residuals)
# Randomly select subset for large datasets
x_subset, y_subset = get_subsets(subset_threshold, x, y)
# Scatter, colored by log density
try:
x, y, z = density_estimation(x_subset, y_subset, x, y)
except linalg.LinAlgError:
z = np.ones(len(y))
ax.scatter(x, y, c=z, cmap=cmap, **scatter_kwargs)
# Axis scale and limits
ax.axis('square')
if loglog is True:
ax.set_xscale('log')
ax.set_yscale('log')
if lims is None:
lims = ax.get_xlim()
else:
if lims is None:
lims, tick_factor = round_lims( | np.concatenate([x, y]) | numpy.concatenate |
#!/usr/bin/env python
#FIXME: Seperate the tests for mesh and general_mesh
#FIXME (Ole): Maxe this test independent of anything that inherits from General_mesh (namely shallow_water)
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import unittest
from math import sqrt
from anuga.abstract_2d_finite_volumes.neighbour_mesh import *
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_periodic
from anuga.config import epsilon
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geometry.polygon import is_inside_polygon
from anuga.utilities.numerical_tools import ensure_numeric
import numpy as num
def distance(x, y):
return sqrt(num.sum((num.array(x)-num.array(y))**2))
class Test_Mesh(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_triangle_inputs(self):
points = [[0.0, 0.0], [4.0, 0.0], [0.0, 3.0]]
vertices = [0,1,2] #Wrong
try:
mesh = Mesh(points, vertices)
except:
pass
else:
msg = 'Should have raised exception'
raise Exception(msg)
def test_basic_triangle(self):
a = [0.0, 0.0]
b = [4.0, 0.0]
c = [0.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices)
#Centroid
centroid = mesh.centroid_coordinates[0]
assert centroid[0] == 4.0/3
assert centroid[1] == 1.0
#Area
assert mesh.areas[0] == 6.0,\
'Area was %f, should have been 6.0' %mesh.areas[0]
#Normals
normals = mesh.get_normals()
assert num.allclose(normals[0, 0:2], [3.0/5, 4.0/5])
assert num.allclose(normals[0, 2:4], [-1.0, 0.0])
assert num.allclose(normals[0, 4:6], [0.0, -1.0])
assert num.allclose(mesh.get_normal(0,0), [3.0/5, 4.0/5])
assert num.allclose(mesh.get_normal(0,1), [-1.0, 0.0])
assert num.allclose(mesh.get_normal(0,2), [0.0, -1.0])
#Edge lengths
assert num.allclose(mesh.edgelengths[0], [5.0, 3.0, 4.0])
#Vertex coordinates
#V = mesh.get_vertex_coordinates()
#assert allclose(V[0], [0.0, 0.0, 4.0, 0.0, 0.0, 3.0])
V = mesh.get_vertex_coordinates()
assert num.allclose(V, [ [0.0, 0.0],
[4.0, 0.0],
[0.0, 3.0] ])
V0 = mesh.get_vertex_coordinate(0, 0)
assert num.allclose(V0, [0.0, 0.0])
V1 = mesh.get_vertex_coordinate(0, 1)
assert num.allclose(V1, [4.0, 0.0])
V2 = mesh.get_vertex_coordinate(0, 2)
assert num.allclose(V2, [0.0, 3.0])
#General tests:
#Test that points are arranged in a counter clock wise order etc
mesh.check_integrity()
#Test that the centroid is located 2/3 of the way
#from each vertex to the midpoint of the opposite side
V = mesh.get_vertex_coordinates()
x0 = V[0, 0]; y0 = V[0, 1]
x1 = V[1, 0]; y1 = V[1, 1]
x2 = V[2, 0]; y2 = V[2, 1]
#x0 = V[0,0]
#y0 = V[0,1]
#x1 = V[0,2]
#y1 = V[0,3]
#x2 = V[0,4]
#y2 = V[0,5]
m0 = [old_div((x1 + x2),2), old_div((y1 + y2),2)]
m1 = [old_div((x0 + x2),2), old_div((y0 + y2),2)]
m2 = [old_div((x1 + x0),2), old_div((y1 + y0),2)]
d0 = distance(centroid, [x0, y0])
d1 = distance(m0, [x0, y0])
assert d0 == old_div(2*d1,3)
#
d0 = distance(centroid, [x1, y1])
d1 = distance(m1, [x1, y1])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
d0 = distance(centroid, [x2, y2])
d1 = distance(m2, [x2, y2])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#Radius
d0 = distance(centroid, m0)
assert d0 == 5.0/6
d1 = distance(centroid, m1)
assert d1 == sqrt(73.0/36)
d2 = distance(centroid, m2)
assert d2 == sqrt(13.0/9)
assert mesh.radii[0] == min(d0, d1, d2)
assert mesh.radii[0] == 5.0/6
#Let x be the centroid of triangle abc.
#Test that areas of the three triangles axc, cxb, and bxa are equal.
points = [a, b, c, centroid]
vertices = [[0,3,2], [2,3,1], [1,3,0]]
new_mesh = Mesh(points, vertices)
assert new_mesh.areas[0] == new_mesh.areas[1]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == old_div(mesh.areas[0],3)
def test_general_triangle(self):
a = [2.0, 1.0]
b = [6.0, 2.0]
c = [1.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices)
centroid = mesh.centroid_coordinates[0]
#Test that the centroid is located 2/3 of the way
#from each vertex to the midpoint of the opposite side
V = mesh.get_vertex_coordinates()
x0 = V[0, 0]; y0 = V[0, 1]
x1 = V[1, 0]; y1 = V[1, 1]
x2 = V[2, 0]; y2 = V[2, 1]
#x0 = V[0,0]
#y0 = V[0,1]
#x1 = V[0,2]
#y1 = V[0,3]
#x2 = V[0,4]
#y2 = V[0,5]
m0 = [old_div((x1 + x2),2), old_div((y1 + y2),2)]
m1 = [old_div((x0 + x2),2), old_div((y0 + y2),2)]
m2 = [old_div((x1 + x0),2), old_div((y1 + y0),2)]
d0 = distance(centroid, [x0, y0])
d1 = distance(m0, [x0, y0])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#
d0 = distance(centroid, [x1, y1])
d1 = distance(m1, [x1, y1])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
d0 = distance(centroid, [x2, y2])
d1 = distance(m2, [x2, y2])
assert abs(d0 - old_div(2*d1,3)) < epsilon, '%e, %e' %(d0, old_div(2*d1,3))
#Radius
d0 = distance(centroid, m0)
d1 = distance(centroid, m1)
d2 = distance(centroid, m2)
assert mesh.radii[0] == min(d0, d1, d2)
#Let x be the centroid of triangle abc.
#Test that areas of the three triangles axc, cxb, and bxa are equal.
points = [a, b, c, centroid]
vertices = [[0,3,2], [2,3,1], [1,3,0]]
new_mesh = Mesh(points, vertices)
assert new_mesh.areas[0] == new_mesh.areas[1]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == new_mesh.areas[2]
assert new_mesh.areas[1] == old_div(mesh.areas[0],3)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
def test_inscribed_circle_equilateral(self):
"""test that the radius is calculated correctly by mesh in the case of an equilateral triangle"""
a = [0.0, 0.0]
b = [2.0, 0.0]
c = [1.0, sqrt(3.0)]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices,use_inscribed_circle=False)
assert num.allclose(mesh.radii[0],old_div(sqrt(3.0),3)),'Steve''s doesn''t work'
mesh = Mesh(points, vertices,use_inscribed_circle=True)
assert num.allclose(mesh.radii[0],old_div(sqrt(3.0),3)),'inscribed circle doesn''t work'
def test_inscribed_circle_rightangle_triangle(self):
"""test that the radius is calculated correctly by mesh in the case of a right-angled triangle"""
a = [0.0, 0.0]
b = [4.0, 0.0]
c = [0.0, 3.0]
points = [a, b, c]
vertices = [[0,1,2]]
mesh = Mesh(points, vertices,use_inscribed_circle=False)
assert num.allclose(mesh.radii[0],5.0/6),'Steve''s doesn''t work'
mesh = Mesh(points, vertices,use_inscribed_circle=True)
assert num.allclose(mesh.radii[0],1.0),'inscribed circle doesn''t work'
def test_two_triangles(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0,0.0]
e = [2.0, 2.0]
points = [a, b, c, e]
vertices = [ [1,0,2], [1,2,3] ] #bac, bce
mesh = Mesh(points, vertices)
assert mesh.areas[0] == 2.0
assert num.allclose(mesh.centroid_coordinates[0], [2.0/3, 2.0/3])
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
def test_more_triangles(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0, 0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0, 0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe, daf, dae
vertices = [ [1,0,2], [1,2,4], [4,2,5], [3,1,4]]
mesh = Mesh(points, vertices)
#Test that points are arranged in a counter clock wise order
mesh.check_integrity()
assert mesh.areas[0] == 2.0
assert mesh.areas[1] == 2.0
assert mesh.areas[2] == 2.0
assert mesh.areas[3] == 2.0
assert mesh.edgelengths[1,0] == 2.0
assert mesh.edgelengths[1,1] == 2.0
assert mesh.edgelengths[1,2] == sqrt(8.0)
assert | num.allclose(mesh.centroid_coordinates[0], [2.0/3, 2.0/3]) | numpy.allclose |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slater_determinants.py."""
from __future__ import absolute_import
import numpy
import unittest
from scipy.linalg import qr
from openfermion.utils import (fermionic_gaussian_decomposition,
givens_decomposition)
from openfermion.utils._slater_determinants import (
diagonalizing_fermionic_unitary, double_givens_rotate, givens_rotate,
swap_rows)
class GivensDecompositionTest(unittest.TestCase):
def test_bad_dimensions(self):
m, n = (3, 2)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(m, m)
y = numpy.random.randn(m, m)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :n]
with self.assertRaises(ValueError):
V, givens_rotations, diagonal = givens_decomposition(Q)
def test_identity(self):
n = 3
Q = numpy.eye(n, dtype=complex)
V, givens_rotations, diagonal = givens_decomposition(Q)
# V should be the identity
I = numpy.eye(n, dtype=complex)
for i in range(n):
for j in range(n):
self.assertAlmostEqual(V[i, j], I[i, j])
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# The diagonal should be ones
for d in diagonal:
self.assertAlmostEqual(d, 1.)
def test_antidiagonal(self):
m, n = (3, 3)
Q = numpy.zeros((m, n), dtype=complex)
Q[0, 2] = 1.
Q[1, 1] = 1.
Q[2, 0] = 1.
V, givens_rotations, diagonal = givens_decomposition(Q)
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# VQ should equal the diagonal
VQ = V.dot(Q)
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
for i in range(n):
for j in range(n):
self.assertAlmostEqual(VQ[i, j], D[i, j])
def test_3_by_3(self):
m, n = (3, 3)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# Compute V * Q * U^\dagger
W = V.dot(Q)
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
# Assert that W and D are the same
for i in range(m):
for j in range(n):
self.assertAlmostEqual(D[i, j], W[i, j])
def test_3_by_4(self):
m, n = (3, 4)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# Compute U
U = numpy.eye(n, dtype=complex)
for parallel_set in givens_rotations:
combined_givens = numpy.eye(n, dtype=complex)
for i, j, theta, phi in parallel_set:
c = numpy.cos(theta)
s = numpy.sin(theta)
phase = numpy.exp(1.j * phi)
G = numpy.array([[c, -phase * s],
[s, phase * c]], dtype=complex)
givens_rotate(combined_givens, G, i, j)
U = combined_givens.dot(U)
# Compute V * Q * U^\dagger
W = V.dot(Q.dot(U.T.conj()))
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
# Assert that W and D are the same
for i in range(m):
for j in range(n):
self.assertAlmostEqual(D[i, j], W[i, j])
def test_3_by_5(self):
m, n = (3, 5)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# Compute U
U = numpy.eye(n, dtype=complex)
for parallel_set in givens_rotations:
combined_givens = numpy.eye(n, dtype=complex)
for i, j, theta, phi in parallel_set:
c = numpy.cos(theta)
s = numpy.sin(theta)
phase = numpy.exp(1.j * phi)
G = numpy.array([[c, -phase * s],
[s, phase * c]], dtype=complex)
givens_rotate(combined_givens, G, i, j)
U = combined_givens.dot(U)
# Compute V * Q * U^\dagger
W = V.dot(Q.dot(U.T.conj()))
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
# Assert that W and D are the same
for i in range(m):
for j in range(n):
self.assertAlmostEqual(D[i, j], W[i, j])
def test_3_by_6(self):
m, n = (3, 6)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# Compute U
U = numpy.eye(n, dtype=complex)
for parallel_set in givens_rotations:
combined_givens = numpy.eye(n, dtype=complex)
for i, j, theta, phi in parallel_set:
c = numpy.cos(theta)
s = numpy.sin(theta)
phase = numpy.exp(1.j * phi)
G = numpy.array([[c, -phase * s],
[s, phase * c]], dtype=complex)
givens_rotate(combined_givens, G, i, j)
U = combined_givens.dot(U)
# Compute V * Q * U^\dagger
W = V.dot(Q.dot(U.T.conj()))
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
# Assert that W and D are the same
for i in range(m):
for j in range(n):
self.assertAlmostEqual(D[i, j], W[i, j])
def test_3_by_7(self):
m, n = (3, 7)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# Compute U
U = numpy.eye(n, dtype=complex)
for parallel_set in givens_rotations:
combined_givens = numpy.eye(n, dtype=complex)
for i, j, theta, phi in parallel_set:
c = numpy.cos(theta)
s = numpy.sin(theta)
phase = numpy.exp(1.j * phi)
G = numpy.array([[c, -phase * s],
[s, phase * c]], dtype=complex)
givens_rotate(combined_givens, G, i, j)
U = combined_givens.dot(U)
# Compute V * Q * U^\dagger
W = V.dot(Q.dot(U.T.conj()))
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
# Assert that W and D are the same
for i in range(m):
for j in range(n):
self.assertAlmostEqual(D[i, j], W[i, j])
def test_3_by_8(self):
m, n = (3, 8)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# Compute U
U = numpy.eye(n, dtype=complex)
for parallel_set in givens_rotations:
combined_givens = numpy.eye(n, dtype=complex)
for i, j, theta, phi in parallel_set:
c = numpy.cos(theta)
s = | numpy.sin(theta) | numpy.sin |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
out = dd.read_parquet(fn, engine=engine)
assert_eq(out, ddf)
check_compression(engine, fn, compression)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=["x"])
check_compression(engine, fn, compression)
@pytest.fixture(
params=[
# fastparquet 0.1.3
{
"columns": [
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.7.1
{
"columns": [
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.8.0
{
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
},
# TODO: fastparquet update
]
)
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(
pandas_metadata
)
assert index_names == ["idx"]
assert column_names == ["A"]
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata["index_columns"] == ["__index_level_0__"]:
assert mapping == {"__index_level_0__": "idx", "A": "A"}
else:
assert mapping == {"idx": "idx", "A": "A"}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ["x"]
e_mapping = {"__index_level_0__": None, "x": "x"}
e_column_index_names = [None]
md = {
"columns": [
{
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "__index_level_0__",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
# pyarrow 0.8.0 None for index
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "x",
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": None,
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
@PYARROW_MARK
def test_read_no_metadata(tmpdir, engine):
# use pyarrow.parquet to create a parquet file without
# pandas metadata
tmp = str(tmpdir) + "table.parq"
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=["A", "B"]
)
pq.write_table(table, tmp)
result = dd.read_parquet(tmp, engine=engine)
expected = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
assert_eq(result, expected)
def test_parse_pandas_metadata_duplicate_index_columns():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_parse_pandas_metadata_column_with_index_name():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_writing_parquet_with_kwargs(tmpdir, engine):
fn = str(tmpdir)
path1 = os.path.join(fn, "normal")
path2 = os.path.join(fn, "partitioned")
pytest.importorskip("snappy")
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
engine_kwargs = {
"pyarrow-dataset": {
"compression": "snappy",
"coerce_timestamps": None,
"use_dictionary": True,
},
"fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None},
}
engine_kwargs["pyarrow-legacy"] = engine_kwargs["pyarrow-dataset"]
ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])
out = dd.read_parquet(path1, engine=engine)
assert_eq(out, ddf, check_index=(engine != "fastparquet"))
# Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
with dask.config.set(scheduler="sync"):
ddf.to_parquet(
path2, engine=engine, partition_on=["a"], **engine_kwargs[engine]
)
out = dd.read_parquet(path2, engine=engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):
fn = str(tmpdir)
with pytest.raises(TypeError):
ddf.to_parquet(fn, engine=engine, unknown_key="unknown_value")
@ANY_ENGINE_MARK
def test_to_parquet_with_get(tmpdir):
from dask.multiprocessing import get as mp_get
tmpdir = str(tmpdir)
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, compute_kwargs={"scheduler": my_get})
assert flag[0]
result = dd.read_parquet(os.path.join(tmpdir, "*"))
assert_eq(result, df, check_index=False)
def test_select_partitioned_column(tmpdir, engine):
pytest.importorskip("snappy")
fn = str(tmpdir)
size = 20
d = {
"signal1": np.random.normal(0, 0.3, size=size).cumsum() + 50,
"fake_categorical1": np.random.choice(["A", "B", "C"], size=size),
"fake_categorical2": np.random.choice(["D", "E", "F"], size=size),
}
df = dd.from_pandas(pd.DataFrame(d), 2)
df.to_parquet(
fn,
compression="snappy",
write_index=False,
engine=engine,
partition_on=["fake_categorical1", "fake_categorical2"],
)
df_partitioned = dd.read_parquet(fn, engine=engine)
df_partitioned[df_partitioned.fake_categorical1 == "A"].compute()
def test_with_tz(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version < parse_version("0.3.0"):
pytest.skip("fastparquet<0.3.0 did not support this")
with warnings.catch_warnings():
if engine == "fastparquet":
# fastparquet-442
warnings.simplefilter("ignore", FutureWarning) # pandas 0.25
fn = str(tmpdir)
df = pd.DataFrame([[0]], columns=["a"], dtype="datetime64[ns, UTC]")
df = dd.from_pandas(df, 1)
df.to_parquet(fn, engine=engine)
df2 = dd.read_parquet(fn, engine=engine)
assert_eq(df, df2, check_divisions=False, check_index=False)
@PYARROW_MARK
def test_arrow_partitioning(tmpdir):
# Issue #3518
path = str(tmpdir)
data = {
"p": np.repeat(np.arange(3), 2).astype(np.int8),
"b": np.repeat(-1, 6).astype(np.int16),
"c": np.repeat(-2, 6).astype(np.float32),
"d": np.repeat(-3, 6).astype(np.float64),
}
pdf = pd.DataFrame(data)
ddf = dd.from_pandas(pdf, npartitions=2)
ddf.to_parquet(path, engine="pyarrow", write_index=False, partition_on="p")
ddf = dd.read_parquet(path, index=False, engine="pyarrow")
ddf.astype({"b": np.float32}).compute()
def test_informative_error_messages():
with pytest.raises(ValueError) as info:
dd.read_parquet("foo", engine="foo")
assert "foo" in str(info.value)
assert "arrow" in str(info.value)
assert "fastparquet" in str(info.value)
def test_append_cat_fp(tmpdir, engine):
path = str(tmpdir)
# https://github.com/dask/dask/issues/4120
df = pd.DataFrame({"x": ["a", "a", "b", "a", "b"]})
df["x"] = df["x"].astype("category")
ddf = dd.from_pandas(df, npartitions=1)
dd.to_parquet(ddf, path, engine=engine)
dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)
d = dd.read_parquet(path, engine=engine).compute()
assert d["x"].tolist() == ["a", "a", "b", "a", "b"] * 2
@PYARROW_MARK
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]}),
pd.DataFrame({"x": ["c", "a", "b"]}),
pd.DataFrame({"x": ["cc", "a", "bbb"]}),
pd.DataFrame({"x": [b"a", b"b", b"c"]}),
pytest.param(pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])})),
pytest.param(pd.DataFrame({"x": pd.Categorical([1, 2, 1])})),
pd.DataFrame({"x": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms
pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us
pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"),
# pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"),
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"),
pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"),
pd.DataFrame({"x": [3, 2, 1]}).astype("float32"),
pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]),
pd.DataFrame(
{"x": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name="foo")
),
pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}),
pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]),
pd.DataFrame({"0": [3, 2, 1]}),
pd.DataFrame({"x": [3, 2, None]}),
pd.DataFrame({"-": [3.0, 2.0, None]}),
pd.DataFrame({".": [3.0, 2.0, None]}),
pd.DataFrame({" ": [3.0, 2.0, None]}),
],
)
def test_roundtrip_arrow(tmpdir, df):
# Index will be given a name when preserved as index
tmp_path = str(tmpdir)
if not df.index.name:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp_path, engine="pyarrow", write_index=True)
ddf2 = dd.read_parquet(tmp_path, engine="pyarrow", gather_statistics=True)
assert_eq(ddf, ddf2)
def test_datasets_timeseries(tmpdir, engine):
tmp_path = str(tmpdir)
df = dask.datasets.timeseries(
start="2000-01-01", end="2000-01-10", freq="1d"
).persist()
df.to_parquet(tmp_path, engine=engine)
df2 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, df2)
def test_pathlib_path(tmpdir, engine):
import pathlib
df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
path = pathlib.Path(str(tmpdir))
ddf.to_parquet(path, engine=engine)
ddf2 = dd.read_parquet(path, engine=engine)
assert_eq(ddf, ddf2)
@PYARROW_LE_MARK
def test_pyarrow_metadata_nthreads(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmp_path, engine="pyarrow")
ops = {"dataset": {"metadata_nthreads": 2}}
ddf2 = dd.read_parquet(tmp_path, engine="pyarrow-legacy", **ops)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_categories_large(tmpdir, engine):
# Issue #5112
fn = str(tmpdir.join("parquet_int16.parq"))
numbers = np.random.randint(0, 800000, size=1000000)
df = pd.DataFrame(numbers.T, columns=["name"])
df.name = df.name.astype("category")
df.to_parquet(fn, engine="fastparquet", compression="uncompressed")
ddf = dd.read_parquet(fn, engine=engine, categories={"name": 80000})
assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))
@write_read_engines()
def test_read_glob_no_meta(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"), engine=read_engine, gather_statistics=False
)
assert_eq(ddf, ddf2, check_divisions=False)
@write_read_engines()
def test_read_glob_yes_meta(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
paths = glob.glob(os.path.join(tmp_path, "*.parquet"))
paths.append(os.path.join(tmp_path, "_metadata"))
ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)
assert_eq(ddf, ddf2, check_divisions=False)
@pytest.mark.parametrize("statistics", [True, False, None])
@pytest.mark.parametrize("remove_common", [True, False])
@write_read_engines()
def test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
if remove_common and os.path.exists(os.path.join(tmp_path, "_common_metadata")):
os.unlink(os.path.join(tmp_path, "_common_metadata"))
ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)
assert_eq(ddf, ddf2, check_divisions=False)
assert ddf.divisions == tuple(range(0, 420, 30))
if statistics is False or statistics is None and read_engine.startswith("pyarrow"):
assert ddf2.divisions == (None,) * 14
else:
assert ddf2.divisions == tuple(range(0, 420, 30))
@write_read_engines()
def test_statistics_nometa(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine, write_metadata_file=False)
ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=True)
assert_eq(ddf, ddf2)
assert ddf.divisions == tuple(range(0, 420, 30))
assert ddf2.divisions == tuple(range(0, 420, 30))
@pytest.mark.parametrize("schema", ["infer", None])
def test_timeseries_nulls_in_schema(tmpdir, engine, schema):
# GH#5608: relative path failing _metadata/_common_metadata detection.
tmp_path = str(tmpdir.mkdir("files"))
tmp_path = os.path.join(tmp_path, "../", "files")
ddf2 = (
dask.datasets.timeseries(start="2000-01-01", end="2000-01-03", freq="1h")
.reset_index()
.map_partitions(lambda x: x.loc[:5])
)
ddf2 = ddf2.set_index("x").reset_index().persist()
ddf2.name = ddf2.name.where(ddf2.timestamp == "2000-01-01", None)
# Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write
dataset = {"validate_schema": False} if engine == "pyarrow-legacy" else {}
ddf2.to_parquet(tmp_path, engine=engine, write_metadata_file=False, schema=schema)
ddf_read = dd.read_parquet(tmp_path, engine=engine, dataset=dataset)
assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)
@PYARROW_LE_MARK
@pytest.mark.parametrize("numerical", [True, False])
@pytest.mark.parametrize(
"timestamp", ["2000-01-01", "2000-01-02", "2000-01-03", "2000-01-04"]
)
def test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):
tmp_path = str(tmpdir)
ddf2 = dd.from_pandas(
pd.DataFrame(
{
"timestamp": [
pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03"),
pd.Timestamp("2000-01-04"),
],
"id": np.arange(4, dtype="float64"),
"name": ["cat", "dog", "bird", "cow"],
}
),
npartitions=2,
).persist()
if numerical:
ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)
ddf2.id = ddf2.id.astype("float64")
else:
ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)
# There should be no schema error if you specify a schema on write
schema = pa.schema(
[("timestamp", pa.timestamp("ns")), ("id", pa.float64()), ("name", pa.string())]
)
ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine="pyarrow")
assert_eq(
dd.read_parquet(
tmp_path,
dataset={"validate_schema": True},
index=False,
engine="pyarrow-legacy",
),
ddf2,
check_divisions=False,
check_index=False,
)
@PYARROW_LE_MARK
def test_read_inconsistent_schema_pyarrow(tmpdir):
# Note: This is a proxy test for a cudf-related issue fix
# (see cudf#5062 github issue). The cause of that issue is
# schema inconsistencies that do not actually correspond to
# different types, but whether or not the file/column contains
# null values.
df1 = pd.DataFrame({"id": [0, 1], "val": [10, 20]})
df2 = pd.DataFrame({"id": [2, 3], "val": [30, 40]})
desired_type = "int64"
other_type = "int32"
df1.val = df1.val.astype(desired_type)
df2.val = df2.val.astype(other_type)
df_expect = pd.concat([df1, df2], ignore_index=True)
df_expect["val"] = df_expect.val.astype(desired_type)
df1.to_parquet(os.path.join(tmpdir, "0.parquet"), engine="pyarrow")
df2.to_parquet(os.path.join(tmpdir, "1.parquet"), engine="pyarrow")
# Read Directory
check = dd.read_parquet(
str(tmpdir), dataset={"validate_schema": False}, engine="pyarrow-legacy"
)
assert_eq(check.compute(), df_expect, check_index=False)
# Read List
check = dd.read_parquet(
os.path.join(tmpdir, "*.parquet"),
dataset={"validate_schema": False},
engine="pyarrow-legacy",
)
assert_eq(check.compute(), df_expect, check_index=False)
def test_graph_size_pyarrow(tmpdir, engine):
import pickle
fn = str(tmpdir)
ddf1 = dask.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="60S", partition_freq="1H"
)
ddf1.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert len(pickle.dumps(ddf2.__dask_graph__())) < 25000
@pytest.mark.parametrize("preserve_index", [True, False])
@pytest.mark.parametrize("index", [None, np.random.permutation(2000)])
def test_getitem_optimization(tmpdir, engine, preserve_index, index):
tmp_path_rd = str(tmpdir.mkdir("read"))
tmp_path_wt = str(tmpdir.mkdir("write"))
df = pd.DataFrame(
{"A": [1, 2] * 1000, "B": [3, 4] * 1000, "C": [5, 6] * 1000}, index=index
)
df.index.name = "my_index"
ddf = dd.from_pandas(df, 2, sort=False)
ddf.to_parquet(tmp_path_rd, engine=engine, write_index=preserve_index)
ddf = dd.read_parquet(tmp_path_rd, engine=engine)["B"]
# Write ddf back to disk to check that the round trip
# preserves the getitem optimization
out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)
dsk = optimize_dataframe_getitem(out.dask, keys=[out.key])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert subgraph_rd.columns == ["B"]
assert next(iter(subgraph_rd.dsk.values()))[0].columns == ["B"]
subgraph_wt = hlg_layer(dsk, "to-parquet")
assert isinstance(subgraph_wt, Blockwise)
assert_eq(ddf.compute(optimize_graph=False), ddf.compute())
def test_getitem_optimization_empty(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
df2 = dd.read_parquet(fn, columns=[], engine=engine)
dsk = optimize_dataframe_getitem(df2.dask, keys=[df2._name])
subgraph = next(iter(dsk.layers.values()))
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == []
def test_getitem_optimization_multi(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
a = dd.read_parquet(fn, engine=engine)["B"]
b = dd.read_parquet(fn, engine=engine)[["C"]]
c = dd.read_parquet(fn, engine=engine)[["C", "A"]]
a1, a2, a3 = dask.compute(a, b, c)
b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)
assert_eq(a1, b1)
assert_eq(a2, b2)
assert_eq(a3, b3)
def test_getitem_optimization_after_filter(tmpdir, engine):
df = pd.DataFrame({"a": [1, 2, 3] * 5, "b": range(15), "c": range(15)})
dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)
ddf = dd.read_parquet(tmpdir, engine=engine)
df2 = df[df["b"] > 10][["a"]]
ddf2 = ddf[ddf["b"] > 10][["a"]]
dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert set(subgraph_rd.columns) == {"a", "b"}
assert_eq(df2, ddf2)
def test_getitem_optimization_after_filter_complex(tmpdir, engine):
df = pd.DataFrame({"a": [1, 2, 3] * 5, "b": range(15), "c": range(15)})
dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)
ddf = dd.read_parquet(tmpdir, engine=engine)
df2 = df[["b"]]
df2 = df2.assign(d=1)
df2 = df[df2["d"] == 1][["b"]]
ddf2 = ddf[["b"]]
ddf2 = ddf2.assign(d=1)
ddf2 = ddf[ddf2["d"] == 1][["b"]]
dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])
subgraph_rd = hlg_layer(dsk, "read-parquet")
assert isinstance(subgraph_rd, DataFrameIOLayer)
assert set(subgraph_rd.columns) == {"b"}
assert_eq(df2, ddf2)
def test_layer_creation_info(tmpdir, engine):
df = pd.DataFrame({"a": range(10), "b": ["cat", "dog"] * 5})
dd.from_pandas(df, npartitions=1).to_parquet(
tmpdir, engine=engine, partition_on=["b"]
)
# Apply filters directly in dd.read_parquet
filters = [("b", "==", "cat")]
ddf1 = dd.read_parquet(tmpdir, engine=engine, filters=filters)
assert "dog" not in ddf1["b"].compute()
# Results will not match if we use dd.read_parquet
# without filters
ddf2 = dd.read_parquet(tmpdir, engine=engine)
with pytest.raises(AssertionError):
assert_eq(ddf1, ddf2)
# However, we can use `creation_info` to regenerate
# the same collection with `filters` defined
info = ddf2.dask.layers[ddf2._name].creation_info
kwargs = info.get("kwargs", {})
kwargs["filters"] = filters
ddf3 = info["func"](*info.get("args", []), **kwargs)
assert_eq(ddf1, ddf3)
@ANY_ENGINE_MARK
def test_blockwise_parquet_annotations(tmpdir):
df = pd.DataFrame({"a": np.arange(40, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=2)
expect.to_parquet(str(tmpdir))
with dask.annotate(foo="bar"):
ddf = dd.read_parquet(str(tmpdir))
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
layer = next(iter(layers.values()))
assert isinstance(layer, DataFrameIOLayer)
assert layer.annotations == {"foo": "bar"}
@ANY_ENGINE_MARK
def test_optimize_blockwise_parquet(tmpdir):
size = 40
npartitions = 2
tmp = str(tmpdir)
df = pd.DataFrame({"a": np.arange(size, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=npartitions)
expect.to_parquet(tmp)
ddf = dd.read_parquet(tmp)
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
assert isinstance(list(layers.values())[0], Blockwise)
# Check single-layer result
assert_eq(ddf, expect)
# Increment by 1
ddf += 1
expect += 1
# Increment by 10
ddf += 10
expect += 10
# `ddf` should now have THREE Blockwise layers
layers = ddf.__dask_graph__().layers
assert len(layers) == 3
assert all(isinstance(layer, Blockwise) for layer in layers.values())
# Check that `optimize_blockwise` fuses all three
# `Blockwise` layers together into a singe `Blockwise` layer
keys = [(ddf._name, i) for i in range(npartitions)]
graph = optimize_blockwise(ddf.__dask_graph__(), keys)
layers = graph.layers
name = list(layers.keys())[0]
assert len(layers) == 1
assert isinstance(layers[name], Blockwise)
# Check final result
assert_eq(ddf, expect)
@PYARROW_MARK
def test_split_row_groups(tmpdir, engine):
"""Test split_row_groups read_parquet kwarg"""
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(
tmp, engine="pyarrow", row_group_size=100
)
ddf3 = dd.read_parquet(tmp, engine=engine, split_row_groups=True, chunksize=1)
assert ddf3.npartitions == 4
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 2
dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
chunksize=1,
)
assert ddf3.npartitions == 12
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 4
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [1, 12])
@pytest.mark.parametrize("gather_statistics", [True, False])
def test_split_row_groups_int(tmpdir, split_row_groups, gather_statistics, engine):
tmp = str(tmpdir)
row_group_size = 10
npartitions = 4
half_size = 400
df = pd.DataFrame(
{
"i32": np.arange(2 * half_size, dtype=np.int32),
"f": np.arange(2 * half_size, dtype=np.float64),
}
)
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(
tmp, engine="pyarrow", row_group_size=row_group_size
)
dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=row_group_size
)
ddf2 = dd.read_parquet(
tmp,
engine=engine,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
)
expected_rg_cout = int(half_size / row_group_size)
assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [8, 25])
def test_split_row_groups_int_aggregate_files(tmpdir, engine, split_row_groups):
# Use pyarrow to write a multi-file dataset with
# multiple row-groups per file
row_group_size = 10
size = 800
df = pd.DataFrame(
{
"i32": np.arange(size, dtype=np.int32),
"f": np.arange(size, dtype=np.float64),
}
)
dd.from_pandas(df, npartitions=4).to_parquet(
str(tmpdir), engine="pyarrow", row_group_size=row_group_size, write_index=False
)
# Read back with both `split_row_groups>1` and
# `aggregate_files=True`
ddf2 = dd.read_parquet(
str(tmpdir),
engine=engine,
split_row_groups=split_row_groups,
aggregate_files=True,
)
# Check that we are aggregating files as expected
npartitions_expected = math.ceil((size / row_group_size) / split_row_groups)
assert ddf2.npartitions == npartitions_expected
assert len(ddf2) == size
assert_eq(df, ddf2, check_index=False)
@PYARROW_MARK
def test_split_row_groups_filter(tmpdir, engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
search_val = 600
filters = [("f", "==", search_val)]
dd.from_pandas(df, npartitions=4).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf2 = dd.read_parquet(tmp, engine=engine)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
filters=filters,
)
assert (ddf3["i32"] == search_val).any().compute()
assert_eq(
ddf2[ddf2["i32"] == search_val].compute(),
ddf3[ddf3["i32"] == search_val].compute(),
)
@ANY_ENGINE_MARK
def test_optimize_getitem_and_nonblockwise(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df.to_parquet(path)
df2 = dd.read_parquet(path)
df2[["a", "b"]].rolling(3).max().compute()
@ANY_ENGINE_MARK
def test_optimize_and_not(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df.to_parquet(path)
df2 = dd.read_parquet(path)
df2a = df2["a"].groupby(df2["c"]).first().to_delayed()
df2b = df2["b"].groupby(df2["c"]).first().to_delayed()
df2c = df2[["a", "b"]].rolling(2).max().to_delayed()
df2d = df2.rolling(2).max().to_delayed()
(result,) = dask.compute(df2a + df2b + df2c + df2d)
expected = [
dask.compute(df2a)[0][0],
dask.compute(df2b)[0][0],
dask.compute(df2c)[0][0],
dask.compute(df2d)[0][0],
]
for a, b in zip(result, expected):
assert_eq(a, b)
@write_read_engines()
def test_chunksize_empty(tmpdir, write_engine, read_engine):
df = pd.DataFrame({"a": pd.Series(dtype="int"), "b": pd.Series(dtype="float")})
ddf1 = dd.from_pandas(df, npartitions=1)
ddf1.to_parquet(tmpdir, engine=write_engine)
ddf2 = dd.read_parquet(tmpdir, engine=read_engine, chunksize="1MiB")
assert_eq(ddf1, ddf2, check_index=False)
@PYARROW_MARK
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("partition_on", [None, "a"])
@pytest.mark.parametrize("chunksize", [4096, "1MiB"])
@write_read_engines()
def test_chunksize_files(
tmpdir, chunksize, partition_on, write_engine, read_engine, metadata
):
if partition_on and read_engine == "fastparquet" and not metadata:
pytest.skip("Fastparquet requires _metadata for partitioned data.")
df_size = 100
df1 = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
}
)
ddf1 = dd.from_pandas(df1, npartitions=9)
ddf1.to_parquet(
str(tmpdir),
engine=write_engine,
partition_on=partition_on,
write_metadata_file=metadata,
write_index=False,
)
ddf2 = dd.read_parquet(
str(tmpdir),
engine=read_engine,
chunksize=chunksize,
aggregate_files=partition_on if partition_on else True,
)
# Check that files where aggregated as expected
if chunksize == 4096:
assert ddf2.npartitions < ddf1.npartitions
elif chunksize == "1MiB":
if partition_on:
assert ddf2.npartitions == 3
else:
assert ddf2.npartitions == 1
# Check that the final data is correct
if partition_on:
df2 = ddf2.compute().sort_values(["b", "c"])
df1 = df1.sort_values(["b", "c"])
assert_eq(df1[["b", "c"]], df2[["b", "c"]], check_index=False)
else:
assert_eq(ddf1, ddf2, check_divisions=False, check_index=False)
@write_read_engines()
@pytest.mark.parametrize("aggregate_files", ["a", "b"])
def test_chunksize_aggregate_files(tmpdir, write_engine, read_engine, aggregate_files):
chunksize = "1MiB"
partition_on = ["a", "b"]
df_size = 100
df1 = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.choice(["small", "large"], size=df_size),
"c": np.random.random(size=df_size),
"d": np.random.randint(1, 100, size=df_size),
}
)
ddf1 = dd.from_pandas(df1, npartitions=9)
ddf1.to_parquet(
str(tmpdir),
engine=write_engine,
partition_on=partition_on,
write_index=False,
)
ddf2 = dd.read_parquet(
str(tmpdir),
engine=read_engine,
chunksize=chunksize,
aggregate_files=aggregate_files,
)
# Check that files where aggregated as expected
if aggregate_files == "a":
assert ddf2.npartitions == 3
elif aggregate_files == "b":
assert ddf2.npartitions == 6
# Check that the final data is correct
df2 = ddf2.compute().sort_values(["c", "d"])
df1 = df1.sort_values(["c", "d"])
assert_eq(df1[["c", "d"]], df2[["c", "d"]], check_index=False)
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("chunksize", [None, 1024, 4096, "1MiB"])
def test_chunksize(tmpdir, chunksize, engine, metadata):
nparts = 2
df_size = 100
row_group_size = 5
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": | np.random.randint(1, 5, size=df_size) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 21:02:07 2021
@author: lukepinkel
"""
import patsy
import numpy as np
import scipy as sp
import scipy.stats
import pandas as pd
import scipy.interpolate
import matplotlib.pyplot as plt
from .smooth_setup import parse_smooths, get_parametric_formula, get_smooth
from ..pyglm.families import Gaussian
from ..utilities.splines import (crspline_basis, bspline_basis, ccspline_basis,
absorb_constraints)
class GaussianAdditiveModel:
def __init__(self, formula, data):
family = Gaussian()
smooth_info = parse_smooths(formula, data)
formula = get_parametric_formula(formula)
y, Xp = patsy.dmatrices(formula, data, return_type='dataframe',
eval_env=1)
varnames = Xp.columns.tolist()
smooths = {}
start = p = Xp.shape[1]
ns = 0
for key, val in smooth_info.items():
slist = get_smooth(**val)
if len(slist)==1:
smooths[key], = slist
p_i = smooths[key]['X'].shape[1]
varnames += [f"{key}{j}" for j in range(1, p_i+1)]
p += p_i
ns += 1
else:
for i, x in enumerate(slist):
by_key = f"{key}_{x['by_cat']}"
smooths[by_key] = x
p_i = x['X'].shape[1]
varnames += [f"{by_key}_{j}" for j in range(1, p_i+1)]
p += p_i
ns += 1
X, S, Sj, ranks, ldS = [Xp], np.zeros((ns, p, p)), [], [], []
for i, (var, s) in enumerate(smooths.items()):
p_i = s['X'].shape[1]
Si, ix = np.zeros((p, p)), np.arange(start, start+p_i)
start += p_i
Si[ix, ix.reshape(-1, 1)] = s['S']
smooths[var]['ix'], smooths[var]['Si'] = ix, Si
X.append(smooths[var]['X'])
S[i] = Si
Sj.append(s['S'])
ranks.append(np.linalg.matrix_rank(Si))
u = np.linalg.eigvals(s['S'])
ldS.append(np.log(u[u>np.finfo(float).eps]).sum())
self.X, self.Xp, self.y = np.concatenate(X, axis=1), Xp.values, y.values[:, 0]
self.S, self.Sj, self.ranks, self.ldS = S, Sj, ranks, ldS
self.f, self.smooths = family, smooths
self.ns, self.n_obs, self.nx = ns, self.X.shape[0], self.X.shape[1]
self.mp = self.nx - np.sum(self.ranks)
self.data = data
theta = np.zeros(self.ns+1)
for i, (var, s) in enumerate(smooths.items()):
ix = smooths[var]['ix']
a = self.S[i][ix, ix[:, None].T]
d = np.diag(self.X[:, ix].T.dot(self.X[:, ix]))
lam = (1.5 * (d / a)[a>0]).mean()
theta[i] = np.log(lam)
varnames += [f"log_smooth_{var}"]
theta[-1] = 1.0
varnames += ["log_scale"]
self.theta = theta
self.varnames = varnames
self.smooth_info = smooth_info
def get_wz(self, eta):
mu = self.f.inv_link(eta)
v = self.f.var_func(mu=mu)
dg = self.f.dinv_link(eta)
r = self.y - mu
a = 1.0 + r * (self.f.dvar_dmu(mu) / v + self.f.d2link(mu) * dg)
z = eta + r / (dg * a)
w = a * dg**2 / v
return z, w
def solve_pls(self, eta, S):
z, w = self.get_wz(eta)
Xw = self.X * w[:, None]
beta_new = np.linalg.solve(Xw.T.dot(self.X)+S, Xw.T.dot(z))
return beta_new
def pirls(self, alpha, n_iters=200, tol=1e-7):
beta = np.zeros(self.X.shape[1])
S = self.get_penalty_mat(alpha)
eta = self.X.dot(beta)
dev = self.f.deviance(self.y, mu=self.f.inv_link(eta)).sum()
success = False
for i in range(n_iters):
beta_new = self.solve_pls(eta, S)
eta_new = self.X.dot(beta_new)
dev_new = self.f.deviance(self.y, mu=self.f.inv_link(eta_new)).sum()
if dev_new > dev:
success=False
break
if abs(dev - dev_new) / dev_new < tol:
success = True
break
eta = eta_new
dev = dev_new
beta = beta_new
return beta, eta, dev, success, i
def get_penalty_mat(self, alpha):
Sa = np.einsum('i,ijk->jk', alpha, self.S)
return Sa
def logdetS(self, alpha, phi):
logdet = 0.0
for i, (r, lds) in enumerate(list(zip(self.ranks, self.ldS))):
logdet += r * np.log(alpha[i]/phi) + lds
return logdet
def grad_beta_rho(self, beta, alpha):
S = self.get_penalty_mat(alpha)
A = np.linalg.inv(self.hess_dev_beta(beta, S))
dbdr = np.zeros((beta.shape[0], alpha.shape[0]))
for i in range(self.ns):
Si = self.S[i]
dbdr[:, i] = -alpha[i] * A.dot(Si.dot(beta))*2.0
return dbdr
def hess_dev_beta(self, beta, S):
mu = self.f.inv_link(self.X.dot(beta))
v0, g1 = self.f.var_func(mu=mu), self.f.dlink(mu)
v1, g2 = self.f.dvar_dmu(mu), self.f.d2link(mu)
r = self.y - mu
w = (1.0 + r * (v1 / v0 + g2 / g1)) / (v0 * g1**2)
d2Ddb2 = 2.0 * (self.X * w[:, None]).T.dot(self.X) + 2.0 * S
return d2Ddb2
def reml(self, theta):
lam, phi = np.exp(theta[:-1]), np.exp(theta[-1])
S, X, y = self.get_penalty_mat(lam), self.X, self.y
XtX = X.T.dot(X)
beta = np.linalg.solve(XtX + S, X.T.dot(y))
r = y - X.dot(beta)
rss = r.T.dot(r)
bsb = beta.T.dot(S).dot(beta)
_, ldh = | np.linalg.slogdet(XtX / phi + S / phi) | numpy.linalg.slogdet |
import sys
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# load vX and vY from a textfile
def loadTextFile(filename):
vX, vY = [], []
with open(filename, "r") as fh:
assert (next(fh).strip("\r\n") == "BSPLINE")
for line in fh:
if line[0] != "#":
ndim, numv, ndegree = [int(tmp) for tmp in line.split()]
break
assert (ndim == 2)
knots = [float(tmp) for tmp in next(fh).split()]
for line in fh:
x, y = [float(tmp) for tmp in line.split()]
vX.append(x)
vY.append(y)
assert (numv == len(vX))
return ndegree, knots, np.array(vX), np.array(vY)
def sweepbspline(args):
# log parameters
print(args)
d1, knots1, y1, z1 = loadTextFile(args.infile1)
x1 = np.zeros_like(y1)
q1 = np.hstack((x1[:, np.newaxis], y1[:, np.newaxis], z1[:, np.newaxis]))
numv1 = q1.shape[0]
k1 = d1 + 1
d2, knots2, x2, y2 = loadTextFile(args.infile2)
z2 = np.zeros_like(x2)
q2 = np.hstack((x2[:, np.newaxis], y2[:, np.newaxis], z2[:, np.newaxis]))
q2 = q2 - q2[0]
numv2 = q2.shape[0]
k2 = d2 + 1
q1 = np.tile(q1[:, np.newaxis, :], (1, numv2, 1))
q2 = np.tile(q2[np.newaxis, :, :], (numv1, 1, 1))
q = q1 + q2 # [numv1, numv2]
for j in range(k2 - 1, numv2):
q_tmp = np.zeros((numv1, k2, 50, 3))
u = np.linspace(0, 1, 50) * (knots2[j + 1] - knots2[j]) + knots2[j]
for i in range(j - k2 + 1, j + 1):
q_tmp[:, i - (j - k2 + 1), :, :] = q[:, i:i+1, :]
for r in range(1, k2):
h = k2 - r
for i in range(j, j - h, -1):
alpha = (u - knots2[i]) / (knots2[i + h] - knots2[i])
q_tmp[:, i - (j - k2 + 1)] = (1.0 - alpha)[np.newaxis, :, np.newaxis] * q_tmp[:, i - 1 - (j - k2 + 1)] + \
alpha[np.newaxis, :, np.newaxis] * q_tmp[:, i - (j - k2 + 1)]
if j == k2 - 1:
q_hat = q_tmp[:, k2 - 1, :, :]
else:
q_hat = np.concatenate((q_hat, q_tmp[:, k2 - 1, :, :]), axis=1) # [numv1, nump2, 3]
for j in range(k1 - 1, numv1):
p_tmp = np.zeros((q_hat.shape[1], k1, 50, 3))
u = np.linspace(0, 1, 50) * (knots1[j + 1] - knots1[j]) + knots1[j]
for i in range(j - k1 + 1, j + 1):
p_tmp[:, i - (j - k1 + 1), :, :] = q_hat[i, :, :][:, np.newaxis, :]
for r in range(1, k1):
h = k1 - r
for i in range(j, j - h, - 1):
alpha = (u - knots1[i]) / (knots1[i + h] - knots1[i])
p_tmp[:, i - (j - k1 + 1)] = (1.0 - alpha)[np.newaxis, :, np.newaxis] * p_tmp[:, i - 1 - (j - k1 + 1)] + \
alpha[np.newaxis, :, np.newaxis] * p_tmp[:, i - (j - k1 + 1)]
if j == k1 - 1:
p = p_tmp[:, k1 - 1, :, :]
else:
p = np.concatenate((p, p_tmp[:, k1 - 1, :, :]), axis=1)
p = | np.transpose(p, (1, 0, 2)) | numpy.transpose |
"""Test functions.
This module implements several known mathematical functions, that can
be used to test RBFOpt.
Licensed under Revised BSD license, see LICENSE.
(C) Copyright Singapore University of Technology and Design 2014.
(C) Copyright International Business Machines Corporation 2017.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import math
import numpy as np
from rbfopt.rbfopt_black_box import RbfoptBlackBox
class branin:
"""
Branin function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((x[1] - (5.1/(4*math.pi*math.pi))*x[0]*x[0] +
5/math.pi*x[0] - 6)**2 + 10*(1-1/(8*math.pi)) *
math.cos(x[0]) +10)
return(value)
dimension = 2
var_lower = np.array([-5, 0])
var_upper = np.array([10, 15])
optimum_point = np.array([9.42477796, 2.47499998])
additional_optima = np.array([ [-3.14159265, 12.27500000],
[3.14159265, 2.27500000] ])
optimum_value = 0.397887357729739
var_type = np.array(['R'] * 2)
# -- end class
class hartman3:
"""
Hartman3 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
value = -math.fsum([ cls.c[i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j] - cls.p[j][i])**2
for j in range(3)]))
for i in range(4) ])
return(value)
a = [ [3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0] ]
p = [ [0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280] ]
c = [1.0, 1.2, 3.0, 3.2]
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([1, 1, 1])
optimum_point = np.array([0.1, 0.55592003, 0.85218259])
optimum_value = -3.8626347486217725
var_type = np.array(['R'] * 3)
# -- end class
class hartman6:
"""
Hartman6 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
value = -math.fsum([ cls.c[i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j] - cls.p[j][i])**2
for j in range(6)]))
for i in range(4) ])
return(value)
a = [ [10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00] ]
p = [ [0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381] ]
c = [1.0, 1.2, 3.0, 3.2]
dimension = 6
var_lower = np.array([0, 0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1, 1])
optimum_point = np.array([0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054])
optimum_value = -3.32236801141551
var_type = np.array(['R'] * 6)
# -- end class
class camel:
"""
Six-hump Camel function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 +
x[0]*x[1] + (-4 + 4*x[1]**2)*x[1]**2)
return(value)
dimension = 2
var_lower = np.array([-3, -2])
var_upper = np.array([3, 2])
optimum_point = np.array([0.08984201, -0.7126])
optimum_value = -1.0316284535
var_type = np.array(['R'] * 2)
# -- end class
class goldsteinprice:
"""
Goldstein & Price function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value= ((1 + (x[0] + x[1] + 1)**2 *
(19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] +
3*x[1]**2)) *
(30 + (2*x[0] - 3*x[1])**2 *
(18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] +
27*x[1]**2)))
return(value)
dimension = 2
var_lower = np.array([-2, -2])
var_upper = np.array([2, 2])
optimum_point = np.array([0.0, -1.0])
optimum_value = 3
var_type = np.array(['R'] * 2)
# -- end class
class shekel5:
"""
Shekel5 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(5) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.1531958509790
var_type = np.array(['R'] * 4)
# -- end class
class shekel7:
"""
Shekel7 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(7) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.4028188369303
var_type = np.array(['R'] * 4)
# -- end class
class shekel10:
"""
Shekel10 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(10) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0, 1.0, 2.0, 3.6],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.53628372621960
var_type = np.array(['R'] * 4)
# -- end class
class ex4_1_1:
"""
ex4_1_1 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==1)
value = (x[0]**6 - (52.0/25.0)*x[0]**5 + (39.0/80.0)*x[0]**4 +
(71.0/10.0)*x[0]**3 - (79.0/20.0)*x[0]**2 - x[0] +
1.0/10.0)
return(value)
dimension = 1
var_lower = np.array([-2])
var_upper = np.array([11])
optimum_point = np.array([-1.19131])
optimum_value = -7.487312360731
var_type = np.array(['R'])
# -- end class
class ex4_1_2:
"""
ex4_1_2 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==1)
a = [-500, 2.5, 1.666666666, 1.25, 1.0, 0.8333333, 0.714285714,
0.625, 0.555555555, 1.0, -43.6363636, 0.41666666, 0.384615384,
0.357142857, 0.3333333, 0.3125, 0.294117647, 0.277777777,
0.263157894, 0.25, 0.238095238, 0.227272727, 0.217391304,
0.208333333, 0.2, 0.192307692, 0.185185185, 0.178571428,
0.344827586, 0.6666666, -15.48387097, 0.15625, 0.1515151,
0.14705882, 0.14285712, 0.138888888, 0.135135135, 0.131578947,
0.128205128, 0.125, 0.121951219, 0.119047619, 0.116279069,
0.113636363, 0.1111111, 0.108695652, 0.106382978, 0.208333333,
0.408163265, 0.8]
value = math.fsum([a[i]*x[0]**(i+1) for i in range(50)])
return(value)
dimension = 1
var_lower = np.array([1])
var_upper = np.array([2])
optimum_point = np.array([1.09106])
optimum_value = -663.4993631230575
var_type = np.array(['R'] * 1)
# -- end class
class ex8_1_1:
"""
ex8_1_1 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = np.cos(x[0])*np.sin(x[1]) - x[0]/(x[1]**2+1)
return(value)
dimension = 2
var_lower = np.array([-1, -1])
var_upper = np.array([2, 1])
optimum_point = np.array([2.0, 0.105783])
optimum_value = -2.0218067833
var_type = | np.array(['R'] * 2) | numpy.array |
import numpy as np
import pytest
import oceansat
def test_laws_2000_scalar():
eprod = oceansat.export_production.laws_2000(pprod=100, sst=15)
assert eprod > 100/100
assert eprod <= 100
assert eprod == pytest.approx(30.78, 0.1)
def test_laws_2000_array():
pprod = | np.full([100,100], 100) | numpy.full |
import numpy as np
def dltnorm(src_pts, target_pts):
# Compute a similarity trasformation T and T_prime to normalize src_pts and target_pts
normalized_src_pts, T = normalization(src_pts)
normalized_target_pts, T_prime = normalization(target_pts)
# Construct A Matrix from pairs a and b
A = []
for i in range(0, len(normalized_src_pts)):
ax, ay = normalized_src_pts[i][0], normalized_src_pts[i][1]
bx, by = normalized_target_pts[i][0], normalized_target_pts[i][1]
A.append([-ax, -ay, -1, 0, 0, 0, bx*ax, bx*ay, bx])
A.append([0, 0, 0, -ax, -ay, -1, by*ax, by*ay, by])
# Compute SVD for A
A = np.asarray(A)
U, S, V = np.linalg.svd(A)
# The solution is the last column of V (9 x 1) Vector
L = V[-1, :]
# Divide by last element as we estimate the homography up to a scale
L = L/V[-1, -1]
H_tilde = L.reshape(3, 3)
# Denormalization: denormalize the homography back
H = np.dot(np.dot(np.linalg.pinv(T_prime), H_tilde), T)
H = H/H[-1, -1]
return H
def normalization(pts):
N = len(pts)
mean = np.mean(pts, 0)
s = | np.linalg.norm((pts-mean), axis=1) | numpy.linalg.norm |
# test_iddata.py - Unittest for the iddata object
#
# Code author: [<NAME> - <EMAIL>]
# Last update: 10th January 2021, by <EMAIL>
#
# Copyright (c) [2017-2021] <NAME> [<EMAIL>]. All rights reserved.
# This file is part of PythonVRFT.
# PythonVRFT is free software: you can redistribute it and/or modify
# it under the terms of the MIT License. You should have received a copy of
# the MIT License along with PythonVRFT.
# If not, see <https://opensource.org/licenses/MIT>.
#
import numpy as np
import scipy.signal as scipysig
from unittest import TestCase
from vrft.iddata import iddata
from vrft.extended_tf import ExtendedTF
class TestIDData(TestCase):
def test_type(self):
a = iddata(0.0, 0.0, 0.0, [0])
with self.assertRaises(ValueError):
a.check()
a = iddata(0.0, [1], 0.0, [0])
with self.assertRaises(ValueError):
a.check()
a = iddata(np.zeros(10), 1, 0.0, [0])
with self.assertRaises(ValueError):
a.check()
a = iddata([0 for i in range(10)], [0 for i in range(10)], 1.0, [0])
self.assertTrue(a.check())
a = iddata(np.zeros(10), | np.zeros(10) | numpy.zeros |
import CSSS
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression as LR
import copy
from Custom_Functions.error_functions import rmse_pos
##### Davide Modifications:
# I enriched the calcPerformanceMetrics routine with new performance metrics
# I added the possibility to feed which IDS do not have solar and contrain their generation to 0
# I added a function Mape_mod
class SolarDisagg_IndvHome(CSSS.CSSS):
def __init__(self, netloads, solarregressors, loadregressors, tuningregressors=None, names=None, nosolar_ids = None):
"""
:param netloads: np.array of net loads at each home, with columns corresponding to entries of "names" if
available.
:param solarregressors: np.array of solar regressors (N_s X T)
:param loadregressors: np.array of load regressors (N_l x T)
:param tuningregressors:
:param names:
"""
## Find aggregate net load, and initialize problem.
agg_net_load = np.sum(netloads, axis = 1)
CSSS.CSSS.__init__(self, agg_net_load)
## If no names are input, create names based on id in vector.
self.N, self.M = netloads.shape
if names is None:
self.names = [str(i) for i in np.arange(self.M)]
else:
self.names = names
## Store net loads as a dictionary
self.netloads = {}
for i in range(self.M):
name = self.names[i]
self.netloads[name] = netloads[:,i]
## If no tuning regressors are input, use an intercept only
if tuningregressors is None:
tuningregressors = np.ones((self.N,1))
## Store solar and load regressors, solar regressors, and begin true solar dict
self.solarRegressors = solarregressors
self.loadRegressors = loadregressors
self.tuningRegressors = tuningregressors
self.trueValues = {}
self.nosolar_ids = nosolar_ids
## Cycle through each net load, and create sources.
for source_name in self.names:
self.addSource(regressor=solarregressors, name = source_name, alpha = 1)
## Add constraints that solar generation cannot exceed zero or net load.
self.addConstraint( self.models[source_name]['source'] <= np.array(self.netloads[source_name]) )
self.addConstraint( self.models[source_name]['source'] <= 0 )
self.addConstraint( self.models[source_name]['theta'] >= 0 ) ####################
##################
if self.nosolar_ids is not None:
for source_name in self.nosolar_ids:
self.addConstraint( self.models[source_name]['source'] == 0 )
##################
## Add the aggregate load source
self.addSource(regressor=loadregressors, name = 'AggregateLoad', alpha = 1)
self.addConstraint( self.models['AggregateLoad']['source'] > 0 )
def Solar_var_norm(self):
return(None)
## Placeholder for variance prediction for tuning
def Total_NL_var(self):
return(None)
## Placeholder for variance prediction for tuning
def addTrueValue(self, trueValue, name):
## Function to add true solar for a given model
## Check that true value is correct number of dimensions
trueValue = trueValue.squeeze()
if not (trueValue.shape == (self.N,)):
raise Exception('True value of a solar or load signal must be one dimensional and length N = %d' % self.N)
if name not in (self.names + ['AggregateLoad']):
raise Exception('Must input a valid household identifier or \"AggregateLoad\"')
## Add True Value
self.trueValues[name] = trueValue
return(None)
def calcPerformanceMetrics(self, dropzeros = False):
## Function to calculate performance metrics
# Dropping zeros is intended to remove nightime solar.
df = pd.DataFrame()
df['models'] = self.models.keys()
df['rmse'] = np.zeros(df.shape[0]) * np.nan
df['cv'] = np.zeros(df.shape[0]) * np.nan
df['mae'] = np.zeros(df.shape[0]) * np.nan
df['pmae'] = np.zeros(df.shape[0]) * np.nan
df['mbe'] = np.zeros(df.shape[0]) * np.nan
df['mean'] = np.zeros(df.shape[0]) * np.nan
df['MAPE'] = np.zeros(df.shape[0]) * np.nan
df['mae_max']= np.zeros(df.shape[0]) * np.nan
df['cv_max'] = np.zeros(df.shape[0]) * np.nan
df['max_sol_pred'] = np.zeros(df.shape[0]) * np.nan
df['cv_pos'] = np.zeros(df.shape[0]) * np.nan
df['rmse_pos'] = np.zeros(df.shape[0]) * np.nan
df['mae_pos'] = | np.zeros(df.shape[0]) | numpy.zeros |
#########################################################################
#
# clustering.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2001-2011 <NAME>
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# <NAME>, <EMAIL>
#
'''
Unsupervised clustering algorithms.
'''
from __future__ import division, print_function, unicode_literals
import numpy
from .classifiers import Classifier
from warnings import warn
def L1(v1, v2):
'Returns L1 distance between 2 rank-1 arrays.'
return numpy.sum(abs((v1 - v2)))
def L2(v1, v2):
'Returns Euclidean distance between 2 rank-1 arrays.'
delta = v1 - v2
return numpy.sqrt(numpy.dot(delta, delta))
class KmeansClusterer(Classifier):
'''An unsupervised classifier using an iterative clustering algorithm'''
def __init__(self, nclusters=10, maxIter=20, endCondition=None,
distanceMeasure=L1):
'''
ARGUMENTS:
nclusters Number of clusters to create. Default is 8
maxIter Max number of iterations. Default is 20
endCondition Optional comparison function. This should be a
function which takes 2 MxN NumPy arrays as its
arguments and returns non-zero when clustering
is to be terminated. The two arguments are the
cluster maps for the previous and current cluster
cycle, respectively.
distanceMeasure The distance measure to use for comparison. The
default is the L1 distance. For Euclidean
distance, specify L2 (no quotes).
'''
self.nclusters = nclusters
self.maxIterations = maxIter
self.endCondition = endCondition
self.distanceMeasure = distanceMeasure
def classify_image(self, image, startClusters=None, iterations=None):
'''
Performs iterative self-organizing clustering of image data.
USAGE: (clMap, centers) = cl.classify_image(image
[, startClusters = None]
[, iterations = None])
ARGUMENTS:
image A SpyFile or an MxNxB NumPy array
startClusters Initial cluster centers. This must be an
nclusters x B array.
iterations If this argument is passed and is a list object,
each intermediate cluster map is appended to
the list.
RETURN VALUES:
clMap An MxN array whos values are the indices of the
cluster for the corresponding element of image.
centers An nclusters x B array of cluster centers.
'''
return isoCluster(
image, self.nclusters, self.maxIterations, startClusters,
self.endCondition, self.distanceMeasure, iterations)
def kmeans(image, nclusters=10, max_iterations=20, **kwargs):
'''
Performs iterative clustering using the k-means algorithm.
Arguments:
`image` (:class:`numpy.ndarray` or :class:`spectral.Image`):
The `MxNxB` image on which to perform clustering.
`nclusters` (int) [default 10]:
Number of clusters to create. The number produced may be less than
`nclusters`.
`max_iterations` (int) [default 20]:
Max number of iterations to perform.
Keyword Arguments:
`start_clusters` (:class:`numpy.ndarray`) [default None]:
`nclusters x B` array of initial cluster centers. If not provided,
initial cluster centers will be spaced evenly along the diagonal of
the N-dimensional bounding box of the image data.
`compare` (callable object) [default None]:
Optional comparison function. `compare` must be a callable object
that takes 2 `MxN` :class:`numpy.ndarray` objects as its arguments
and returns non-zero when clustering is to be terminated. The two
arguments are the cluster maps for the previous and current cluster
cycle, respectively.
`distance` (callable object) [default :func:`~spectral.clustering.L2`]:
The distance measure to use for comparison. The default is to use
**L2** (Euclidean) distance. For Manhattan distance, specify
:func:`~spectral.clustering.L1`.
`frames` (list) [default None]:
If this argument is given and is a list object, each intermediate
cluster map is appended to the list.
Returns a 2-tuple containing:
`class_map` (:class:`numpy.ndarray`):
An `MxN` array whos values are the indices of the cluster for the
corresponding element of `image`.
`centers` (:class:`numpy.ndarray`):
An `nclusters x B` array of cluster centers.
Iterations are performed until clusters converge (no pixels reassigned
between iterations), `maxIterations` is reached, or `compare` returns
nonzero. If :exc:`KeyboardInterrupt` is generated (i.e., CTRL-C pressed)
while the algorithm is executing, clusters are returned from the previously
completed iteration.
'''
import spectral
import numpy
if isinstance(image, numpy.ndarray):
return kmeans_ndarray(*(image, nclusters, max_iterations), **kwargs)
status = spectral._status
# defaults for kwargs
start_clusters = None
compare = None
distance = L2
iterations = None
for (key, val) in list(kwargs.items()):
if key == 'start_clusters':
start_clusters = val
elif key == 'compare':
compare = val
elif key == 'distance':
if val in (L1, 'L1'):
distance = L1
elif val in (L2, 'L2'):
distance = L2
else:
raise ValueError('Unrecognized keyword argument.')
elif key == 'frames':
if not hasattr(val, 'append'):
raise TypeError('"frames" keyword argument must have "append"'
'attribute.')
iterations = frames
else:
raise NameError('Unsupported keyword argument.')
(nrows, ncols, nbands) = image.shape
clusters = numpy.zeros((nrows, ncols), int)
old_clusters = numpy.copy(clusters)
if start_clusters is not None:
assert (start_clusters.shape[0] == nclusters), 'There must be \
nclusters clusters in the startCenters array.'
centers = numpy.array(start_clusters)
else:
print('Initializing clusters along diagonal of N-dimensional bounding box.')
centers = numpy.empty((nclusters, nbands), float)
boxMin = image[0, 0]
boxMax = image[0, 0]
for i in range(nrows):
for j in range(ncols):
x = image[i, j]
boxMin = numpy.where(boxMin < x, boxMin, x)
boxMax = numpy.where(boxMax > x, boxMax, x)
boxMin = boxMin.astype(float)
boxMax = boxMax.astype(float)
delta = (boxMax - boxMin) / (nclusters - 1)
for i in range(nclusters):
centers[i] = boxMin.astype(float) + i * delta
itnum = 1
while (itnum <= max_iterations):
try:
status.display_percentage('Iteration %d...' % itnum)
# Assign all pixels
for i in range(nrows):
status.update_percentage(float(i) / nrows * 100.)
for j in range(ncols):
minDist = 1.e30
for k in range(nclusters):
dist = distance(image[i, j], centers[k])
if (dist < minDist):
clusters[i, j] = k
minDist = dist
# Update cluster centers
sums = numpy.zeros((nclusters, nbands), 'd')
counts = ([0] * nclusters)
for i in range(nrows):
for j in range(ncols):
counts[clusters[i, j]] += 1
sums[clusters[i, j]] += image[i, j]
old_centers = centers[:]
for i in range(nclusters):
if (counts[i] > 0):
centers[i] = sums[i] / counts[i]
centers = numpy.array(centers)
if iterations is not None:
iterations.append(clusters)
if compare and compare(old_clusters, clusters):
status.end_percentage('done.')
break
else:
nChanged = numpy.sum(clusters != old_clusters)
if nChanged == 0:
status.end_percentage('0 pixels reassigned.')
break
else:
status.end_percentage('%d pixels reassigned.' \
% (nChanged))
old_clusters = clusters
old_centers = centers
clusters = numpy.zeros((nrows, ncols), int)
itnum += 1
except KeyboardInterrupt:
print("KeyboardInterrupt: Returning clusters from previous iteration")
return (old_clusters, old_centers)
print('kmeans terminated with', len(set(old_clusters.ravel())), \
'clusters after', itnum - 1, 'iterations.', file=status)
return (old_clusters, centers)
def kmeans_ndarray(image, nclusters=10, max_iterations=20, **kwargs):
'''
Performs iterative clustering using the k-means algorithm.
Arguments:
`image` (:class:`numpy.ndarray` or :class:`spectral.Image`):
The `MxNxB` image on which to perform clustering.
`nclusters` (int) [default 10]:
Number of clusters to create. The number produced may be less than
`nclusters`.
`max_iterations` (int) [default 20]:
Max number of iterations to perform.
Keyword Arguments:
`start_clusters` (:class:`numpy.ndarray`) [default None]:
`nclusters x B` array of initial cluster centers. If not provided,
initial cluster centers will be spaced evenly along the diagonal of
the N-dimensional bounding box of the image data.
`compare` (callable object) [default None]:
Optional comparison function. `compare` must be a callable object
that takes 2 `MxN` :class:`numpy.ndarray` objects as its arguments
and returns non-zero when clustering is to be terminated. The two
arguments are the cluster maps for the previous and current cluster
cycle, respectively.
`distance` (callable object) [default :func:`~spectral.clustering.L2`]:
The distance measure to use for comparison. The default is to use
**L2** (Euclidean) distance. For Manhattan distance, specify
:func:`~spectral.clustering.L1`.
`frames` (list) [default None]:
If this argument is given and is a list object, each intermediate
cluster map is appended to the list.
Returns a 2-tuple containing:
`class_map` (:class:`numpy.ndarray`):
An `MxN` array whos values are the indices of the cluster for the
corresponding element of `image`.
`centers` (:class:`numpy.ndarray`):
An `nclusters x B` array of cluster centers.
Iterations are performed until clusters converge (no pixels reassigned
between iterations), `max_iterations` is reached, or `compare` returns
nonzero. If :exc:`KeyboardInterrupt` is generated (i.e., CTRL-C pressed)
while the algorithm is executing, clusters are returned from the previously
completed iteration.
'''
import spectral
import numpy as np
from spectral.algorithms.spymath import has_nan, NaNValueError
if has_nan(image):
raise NaNValueError('Image data contains NaN values.')
status = spectral._status
# defaults for kwargs
start_clusters = None
compare = None
distance = L2
iterations = None
for (key, val) in list(kwargs.items()):
if key == 'start_clusters':
start_clusters = val
elif key == 'compare':
compare = val
elif key == 'distance':
if val in (L1, 'L1'):
distance = L1
elif val in (L2, 'L2'):
distance = L2
else:
raise ValueError('Unrecognized keyword argument.')
elif key == 'frames':
if not hasattr(val, 'append'):
raise TypeError('"frames" keyword argument must have "append"'
'attribute.')
iterations = val
else:
raise NameError('Unsupported keyword argument.')
(nrows, ncols, nbands) = image.shape
N = nrows * ncols
image = image.reshape((N, nbands))
clusters = numpy.zeros((N,), int)
if start_clusters is not None:
assert (start_clusters.shape[0] == nclusters), 'There must be \
nclusters clusters in the startCenters array.'
centers = numpy.array(start_clusters)
else:
print('Initializing clusters along diagonal of N-dimensional bounding box.')
boxMin = np.amin(image, 0)
boxMax = np.amax(image, 0)
delta = (boxMax - boxMin) / (nclusters - 1)
centers = np.empty((nclusters, nbands), float)
for i in range(nclusters):
centers[i] = boxMin + i * delta
distances = np.empty((N, nclusters), float)
old_centers = np.array(centers)
clusters = np.zeros((N,), int)
old_clusters = np.copy(clusters)
diffs = np.empty_like(image, dtype=np.float64)
itnum = 1
while (itnum <= max_iterations):
try:
status.display_percentage('Iteration %d...' % itnum)
# Assign all pixels
for i in range(nclusters):
diffs = np.subtract(image, centers[i], out=diffs)
if distance == L2:
distances[:, i] = np.einsum('ij,ij->i', diffs, diffs)
else:
diffs = np.abs(diffs, out=diffs)
distances[:, i] = np.einsum('ij->i', diffs)
clusters[:] = | np.argmin(distances, 1) | numpy.argmin |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function # py2.6 with_statement
import sys
import pprint
import h5py
import numpy as np
import os.path
# date related stuff
import datetime
import dateutil
import dateutil.tz
import dateutil.parser
import arrow
# compatibility
import future
from future.utils import iteritems
from builtins import range # range and switch xrange -> range
# from past.builtins import xrange # later, move to from builtins import
import edflib
import eeghdf
# really need to check the original data type and then save as that datatype along with the necessary conversion factors
# so can convert voltages on own
# try with float32 instead?
# LPCH often uses these labels for electrodes
LPCH_COMMON_1020_LABELS = [
'Fp1',
'Fp2',
'F3',
'F4',
'C3',
'C4',
'P3',
'P4',
'O1',
'O2',
'F7',
'F8',
'T3',
'T4',
'T5',
'T6',
'Fz',
'Cz',
'Pz',
'E',
'PG1',
'PG2',
'A1',
'A2',
'T1',
'T2',
'X1',
'X2',
'X3',
'X4',
'X5',
'X6',
'X7',
'EEG Mark1',
'EEG Mark2',
'Events/Markers']
# common 10-20 extended clinical (T1/T2 instead of FT9/FT10)
# will need to specify these as bytes I suppose (or is this ok in utf-8 given the ascii basis)
# keys should be all one case (say upper)
lpch2edf_fixed_len_labels = dict(
FP1='EEG Fp1 ',
F7='EEG F7 ',
T3='EEG T3 ',
T5='EEG T5 ',
O1='EEG O1 ',
F3='EEG F3 ',
C3='EEG C3 ',
P3='EEG P3 ',
FP2='EEG Fp2 ',
F8='EEG F8 ',
T4='EEG T4 ',
T6='EEG T6 ',
O2='EEG O2 ',
F4='EEG F4 ',
C4='EEG C4 ',
P4='EEG P4 ',
CZ='EEG Cz ',
FZ='EEG Fz ',
PZ='EEG Pz ',
T1='EEG FT9 ', # maybe I should map this to FT9/T1
T2='EEG FT10 ', # maybe I should map this to FT10/T2
A1='EEG A1 ',
A2='EEG A2 ',
# these are often (?always) EKG at LPCH, note edfspec says use ECG instead
# of EKG
X1='ECG X1 ', # is this invariant? usually referenced to A1
# this is sometimes ECG but not usually (depends on how squirmy)
X2='X2 ',
PG1='EEG Pg1 ',
PG2='EEG Pg2 ',
# now the uncommon ones
NZ='EEG Nz ',
FPZ='EEG Fpz ',
AF7='EEG AF7 ',
AF8='EEG AF8 ',
AF3='EEG AF3 ',
AFz='EEG AFz ',
AF4='EEG AF4 ',
F9='EEG F9 ',
# F7
F5='EEG F5 ',
# F3 ='EEG F3 ',
F1='EEG F1 ',
# Fz
F2='EEG F2 ',
# F4
F6='EEG F6 ',
# F8
F10='EEG F10 ',
FT9='EEG FT9 ',
FT7='EEG FT7 ',
FC5='EEG FC5 ',
FC3='EEG FC3 ',
FC1='EEG FC1 ',
FCz='EEG FCz ',
FC2='EEG FC2 ',
FC4='EEG FC4 ',
FC6='EEG FC6 ',
FT8='EEG FT8 ',
FT10='EEG FT10 ',
T9='EEG T9 ',
T7='EEG T7 ',
C5='EEG C5 ',
# C3 above
C1='EEG C1 ',
# Cz above
C2='EEG C2 ',
# C4 ='EEG C4 ',
C6='EEG C6 ',
T8='EEG T8 ',
T10='EEG T10 ',
# A2
# T3
# T4
# T5
# T6
TP9='EEG TP9 ',
TP7='EEG TP7 ',
CP5='EEG CP5 ',
CP3='EEG CP3 ',
CP1='EEG CP1 ',
CPZ='EEG CPz ',
CP2='EEG CP2 ',
CP4='EEG CP4 ',
CP6='EEG CP6 ',
TP8='EEG TP8 ',
TP10='EEG TP10 ',
P9='EEG P9 ',
P7='EEG P7 ',
P5='EEG P5 ',
# P3
P1='EEG P1 ',
# Pz
P2='EEG P2 ',
# P4
P6='EEG P6 ',
P8='EEG P8 ',
P10='EEG P10 ',
PO7='EEG PO7 ',
PO3='EEG PO3 ',
POZ='EEG POz ',
PO4='EEG PO4 ',
PO8='EEG PO8 ',
# O1
OZ='EEG Oz ',
# O2
IZ='EEG Iz ',
)
lpch2edf_fixed_len_labels
# print("lpch2edf_fixed_len_labels::\n")
# pprint.pprint(lpch2edf_fixed_len_labels)
LPCH_TO_STD_LABELS_STRIP = {k: v.strip()
for k, v in iteritems(lpch2edf_fixed_len_labels)}
# print('LPCH_TO_STD_LABELS_STRIP::\n')
# pprint.pprint(LPCH_TO_STD_LABELS_STRIP)
LPCH_COMMON_1020_LABELS_to_EDF_STANDARD = {
}
def normalize_lpch_signal_label(label):
label = label.replace('-REF','')
label = label.replace('-LE','')
uplabel = label.upper()
if uplabel in LPCH_TO_STD_LABELS_STRIP:
return LPCH_TO_STD_LABELS_STRIP[uplabel]
else:
return label
def edf_block_iter_generator(
edf_file, nsamples, samples_per_chunk, dtype='int32'):
"""
factory to produce generators for iterating through an edf file and filling
up an array from the edf with the signal data starting at 0. You choose the
number of @samples_per_chunk, and number of samples to do in total
@nsamples as well as the dtype. 'int16' is reasonable as well 'int32' will
handle everything though
it yields -> (numpy_buffer, mark, num)
numpy_buffer,
mark, which is where in the file in total currently reading from
num -- which is the number of samples in the buffer (per signal) to transfer
"""
nchan = edf_file.signals_in_file
# 'int32' will work for int16 as well
buf = | np.zeros((nchan, samples_per_chunk), dtype=dtype) | numpy.zeros |
"""
A module containing unit tests for the `linalg` module.
Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
import pytest
import numpy as np
from tweakwcs import linalg, linearfit
@pytest.mark.parametrize('tp,expected', [
(np.float16, False),
(np.longdouble, True),
])
def test_longdouble_cmp(tp, expected):
assert linalg._is_longdouble_lte_flt_type(tp) == expected
def test_inv_order2():
feps = 1000 * np.finfo(np.double).eps
# Generate a 2D rotation/scale/skew matrix + some random noise.
# The reason for this complicated approach is to avoid creating
# singular matrices.
angle1 = 360 * np.random.random()
angle2 = angle1 + 50 * (np.random.random() - 0.5)
scale1 = 0.7 + 0.6 * np.random.random()
scale2 = 0.7 + 0.6 * np.random.random()
a = linearfit.build_fit_matrix((angle1, angle2), (scale1, scale2))
# invert using numpy.linalg:
use_numpy = linalg._USE_NUMPY_LINALG_INV
linalg._USE_NUMPY_LINALG_INV = True
try:
x = linalg.inv(a)
r = np.identity(2) - np.dot(a, x)
# Use Morris Newman's formula to asses the quality of the inversion
# (see https://nvlpubs.nist.gov/nistpubs/jres/78B/jresv78Bn2p65_A1b.pdf
# January 3, 1974).
err = 2.0 * np.abs(np.dot(x, r)).max() / (1.0 - np.abs(r).max())
assert err < feps
finally:
linalg._USE_NUMPY_LINALG_INV = use_numpy
# invert using tweakwcs.linalg:
linalg._USE_NUMPY_LINALG_INV = False
try:
x = linalg.inv(a)
r = | np.identity(2) | numpy.identity |
import torch
import numpy as np
import csv
# cutout
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
# cutmix
"""输入为:样本的size和生成的随机lamda值"""
def rand_bbox(size, lam):
W = size[2]
H = size[3]
"""1.论文里的公式2,求出B的rw,rh"""
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
"""2.论文里的公式2,求出B的rx,ry(bbox的中心点)"""
cx = | np.random.randint(W) | numpy.random.randint |
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import pandas as pd
F0 = np.zeros((4,3))
F0[0,2] = 10
F0[3,2] = 6
Fa = np.c_[F0, np.zeros((4,6-2))] #c_ acrescenta coluna
Fa = np.r_[Fa, [np.ones(np.shape(Fa)[1])]] #r_ acrescenta linha
print(np.shape(F0)[0])
print(F0, F0[:,1])
print(Fa)
T = 10
F = np.c_[F0, np.zeros((np.shape(F0)[0],T - np.shape(F0)[1]))]
print(np.zeros((3,1)))
A = np.array([[2,3],
[1,4]])
B = np.array([[1,4],
[1,1]]).T
C = np.array([[1,4],
[1,1]])
D = B[:,1] - | np.dot(A[0,:],B) | numpy.dot |
import numpy as np
from numpy.lib.arraysetops import isin
import paddle
from ..data_processor.readers import preprocess_inputs, preprocess_image, read_image, restore_image
from ..data_processor.visualizer import sp_weights_to_image_explanation, overlay_threshold, save_image, show_vis_explanation
from ._lime_base import LimeBase
from .abc_interpreter import Interpreter, InputOutputInterpreter
class LIMECVInterpreter(InputOutputInterpreter):
"""
LIME Interpreter for CV tasks.
More details regarding the LIME method can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(self,
paddle_model,
use_cuda=None,
device='gpu:0',
model_input_shape=[3, 224, 224],
random_seed=None) -> None:
"""
Args:
paddle_model (callable): A model with ``forward`` and possibly ``backward`` functions.
device (str): The device used for running `paddle_model`, options: ``cpu``, ``gpu:0``, ``gpu:1`` etc.
use_cuda (bool): Would be deprecated soon. Use ``device`` directly.
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
InputOutputInterpreter.__init__(self, paddle_model, device, use_cuda)
self.model_input_shape = model_input_shape
# use the default LIME setting
self.lime_base = LimeBase(random_state=random_seed)
self.lime_results = {}
def interpret(self,
data,
interpret_class=None,
num_samples=1000,
batch_size=50,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
data (str): The input file path.
interpret_class (int, optional): The index of class to interpret. If None, the most likely label will be used. Default: None
num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000
batch_size (int, optional): Number of samples to forward each time. Default: 50
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str, optional): The path to save the processed image. If None, the image will not be saved. Default: None
Returns:
[dict]: LIME results: {interpret_label_i: weights on features}
"""
# preprocess_inputs
if isinstance(data, str):
crop_size = self.model_input_shape[1]
target_size = int(self.model_input_shape[1] * 1.143)
img = read_image(data, target_size, crop_size)
else:
if len(data.shape) == 3:
data = np.expand_dims(data, axis=0)
if np.issubdtype(data.dtype, np.integer):
img = data
else:
# for later visualization
img = restore_image(data.copy())
data = preprocess_image(img)
data_type = | np.array(data) | numpy.array |
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.utils import _triple
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
import math
import os
import datetime
from matplotlib import pyplot as plt
class SpatioTemporalConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
super(SpatioTemporalConv, self).__init__()
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
spatial_stride = [1, stride[1], stride[2]]
spatial_padding = [0, padding[1], padding[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
temporal_stride = [stride[0], 1, 1]
temporal_padding = [padding[0], 0, 0]
intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/ \
(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
stride=spatial_stride, padding=spatial_padding, bias=bias)
self.bn = nn.BatchNorm3d(intermed_channels)
self.relu = nn.ReLU()
self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size,
stride=temporal_stride, padding=temporal_padding, bias=bias)
def forward(self, x):
x = self.relu(self.bn(self.spatial_conv(x)))
x = self.temporal_conv(x)
return x
class SpatioTemporalResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, downsample=False):
super(SpatioTemporalResBlock, self).__init__()
self.downsample = downsample
padding = kernel_size//2
if self.downsample:
self.downsampleconv = SpatioTemporalConv(in_channels, out_channels, 1, stride=2)
self.downsamplebn = nn.BatchNorm3d(out_channels)
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding, stride=2)
else:
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding)
self.bn1 = nn.BatchNorm3d(out_channels)
self.relu1 = nn.ReLU()
self.conv2 = SpatioTemporalConv(out_channels, out_channels, kernel_size, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.outrelu = nn.ReLU()
def forward(self, x):
res = self.relu1(self.bn1(self.conv1(x)))
res = self.bn2(self.conv2(res))
if self.downsample:
x = self.downsamplebn(self.downsampleconv(x))
return self.outrelu(x + res)
class SpatioTemporalResLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, layer_size, block_type=SpatioTemporalResBlock, downsample=False):
super(SpatioTemporalResLayer, self).__init__()
self.block1 = block_type(in_channels, out_channels, kernel_size, downsample)
self.blocks = nn.ModuleList([])
for i in range(layer_size - 1):
self.blocks += [block_type(out_channels, out_channels, kernel_size)]
def forward(self, x):
x = self.block1(x)
for block in self.blocks:
x = block(x)
return x
class R2Plus1DNet(nn.Module):
def __init__(self, layer_sizes, block_type=SpatioTemporalResBlock, p = 0.2):
super(R2Plus1DNet, self).__init__()
self.conv1 = SpatioTemporalConv(3, 64, [3, 7, 7], stride=[1, 2, 2], padding=[1, 3, 3])
self.conv2 = SpatioTemporalResLayer(64, 64, 3, layer_sizes[0], block_type=block_type)
self.conv3 = SpatioTemporalResLayer(64, 128, 3, layer_sizes[1], block_type=block_type, downsample=True)
self.conv4 = SpatioTemporalResLayer(128, 256, 3, layer_sizes[2], block_type=block_type, downsample=True)
self.conv5 = SpatioTemporalResLayer(256, 512, 3, layer_sizes[3], block_type=block_type, downsample=True)
self.pool = nn.AdaptiveAvgPool3d(1)
# define dropout layer in __init__
self.drop_layer = nn.Dropout(p = p)
def forward(self, x):
x = self.conv1(x)
x = self.drop_layer(x)
x = self.conv2(x)
x = self.drop_layer(x)
x = self.conv3(x)
x = self.drop_layer(x)
x = self.conv4(x)
x = self.drop_layer(x)
x = self.conv5(x)
x = self.drop_layer(x)
x = self.pool(x)
return x.view(-1, 512)
class R2Plus1DClassifier(nn.Module):
def __init__(self, num_classes, layer_sizes, block_type=SpatioTemporalResBlock, p = 0.2):
super(R2Plus1DClassifier, self).__init__()
self.res2plus1d = R2Plus1DNet(layer_sizes, block_type, p = p)
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
x = self.res2plus1d(x)
x = self.linear(x)
return x
class DataGenerator(torch.utils.data.Dataset):
def __init__(self, vids, labels, batch_size, flip = False, angle = 0, crop = 0, shift = 0):
self.vids = vids
self.labels = labels
self.indices = np.arange(vids.shape[0])
self.batch_size = batch_size
self.flip = flip
self.angle = angle
self.crop = crop
self.shift = shift
self.max_index = vids.shape[0] // batch_size
self.index = 0
np.random.shuffle(self.indices)
def __iter__(self):
return self
def random_zoom(self, batch, x, y):
ax = np.random.uniform(self.crop)
bx = np.random.uniform(ax)
ay = np.random.uniform(self.crop)
by = np.random.uniform(ay)
x = x*(1-ax/batch.shape[2]) + bx
y = y*(1-ay/batch.shape[3]) + by
return x, y
def random_rotate(self, batch, x, y):
rad = np.random.uniform(-self.angle, self.angle)/180*np.pi
rotm = np.array([[np.cos(rad), np.sin(rad)],
[-np.sin(rad), np.cos(rad)]])
x, y = np.einsum('ji, mni -> jmn', rotm, np.dstack([x, y]))
return x, y
def random_translate(self, batch, x, y):
xs = np.random.uniform(-self.shift, self.shift)
ys = np.random.uniform(-self.shift, self.shift)
return x + xs, y + ys
def horizontal_flip(self, batch):
return np.flip(batch, 3)
def __next__(self):
if self.index == self.max_index:
self.index = 0
np.random.shuffle(self.indices)
raise StopIteration
indices = self.indices[self.index * self.batch_size:(self.index + 1) * self.batch_size]
vids = np.array(self.vids[indices])
x, y = np.meshgrid(range(112), range(112))
x = x*24/112
y = y*24/112
if self.crop:
x, y = self.random_zoom(vids, x, y)
if self.angle:
x, y = self.random_rotate(vids, x, y)
if self.shift:
x, y = self.random_translate(vids, x, y)
if self.flip and np.random.random() < 0.5:
vids = self.horizontal_flip(vids)
x = np.clip(x, 0, vids.shape[2]-1).astype(np.int)
y = np.clip(y, 0, vids.shape[3]-1).astype(np.int)
vids = vids[:,:,x,y].transpose(0,1,3,2,4)
self.index += 1
out = torch.FloatTensor(vids.transpose(0,4,1,2,3))
return out, self.labels[indices]
def evaluate(data):
with torch.no_grad():
model.eval()
correct = 0
loss = 0
for imgs, labels in data:
output = model(imgs.to(device))
loss += criterion(output, labels.to(device)).detach().item() * imgs.size(0)
correct += (output.max(1).indices.cpu() == labels).sum().detach().item()
return correct, loss
def train(epoch):
model.train()
for i, (imgs, labels) in enumerate(train_data):
optimizer.zero_grad()
output = model(imgs.to(device))
loss = criterion(output, labels.to(device))
loss.backward()
optimizer.step()
if i % 32 == 0:
print(i, "/", train_data.max_index, sep="")
epochs = 20
batch_size = 4
learning_rate = 0.001
print("Dataset loading..", end = " ")
train_imgs = np.load("./cacophony-preprocessed/training.npy").transpose(0,1,3,4,2)
train_labels = torch.tensor(np.load("./cacophony-preprocessed/training-labels.npy"))
val_imgs = np.load("./cacophony-preprocessed/validation.npy").transpose(0,1,3,4,2)
val_labels = torch.tensor(np.load("./cacophony-preprocessed/validation-labels.npy"))
test_imgs = | np.load("./cacophony-preprocessed/test.npy") | numpy.load |
import pickle
#import matplotlib.pyplot as plt
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
def read_res(fname):
f=open(fname,'rb')
R=pickle.load(f)
val = R['val_acc']
test = R['test_acc']
best_idx=np.argmax(val)
#print("val",val)
#print('best epoch',best_idx*10)
ftest = test[best_idx]
return ftest
def read_max_res(fname):
f = open(fname, 'rb')
R = pickle.load(f)
val = R['val_acc']
test = R['test_acc']
return np.max(test)
def print_res(Fs,Cs,P):
num_c = len(Cs)
for c in Cs:
line_res = ''
avg = 0.0
valid_fold = 0
# print('c',c)
for f in Fs:
# print('F ',f)
k = str(f) + ':' + str(c)
#print('K', k)
if k in P:
line_res += '%.4f' % P[k] + ' &'
avg += P[k]
valid_fold += 1
else:
line_res += ' &'
if valid_fold > 0:
#print('A', avg, valid_fold)
a = avg / float(valid_fold)
line_res += '%.4f' % a + "\\\\ \n"
else:
line_res += " \\\\\n"
print('C' + str(c)+'&', line_res)
def plot_progress(folder_resname,configid=5):
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
#ax1.plot(x, y)
#ax1.set_title('Sharing x per column, y per row')
#ax2.scatter(x, y)
#ax3.scatter(x, 2 * y ** 2 - 1, color='r')
#ax4.plot(x, 2 * y ** 2 - 1, color='r')
for fold_id,ax in zip([1,2,3,4],[ax1,ax2,ax3,ax4]):
ax.set_ylim([0.6, 1.05])
fname=os.path.join(folder_resname,'table_F'+str(fold_id)+'_C'+str(configid)+'.pickle')
try:
f = open(fname, 'rb')
R = pickle.load(f)
except IOError:
print('File not found',fname)
continue
train= | np.array(R['train_acc']) | numpy.array |
#################################################################
# Code written by <NAME> (<EMAIL>)
# For bug report, please contact author using the email address
#################################################################
import sys, random
import numpy as np
import pickle as pickle
from collections import OrderedDict
import argparse
import theano
import theano.tensor as T
from theano import config
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def unzip(zipped):
new_params = OrderedDict()
for k, v in zipped.items():
new_params[k] = v.get_value()
return new_params
def init_params(options):
params = OrderedDict()
numXcodes = options['numXcodes']
numYcodes = options['numYcodes']
embDimSize= options['embDimSize']
demoSize = options['demoSize']
hiddenDimSize = options['hiddenDimSize']
params['W_emb'] = np.random.uniform(-0.01, 0.01, (numXcodes, embDimSize)).astype(config.floatX) #emb matrix needs an extra dimension for the time
params['b_emb'] = np.zeros(embDimSize).astype(config.floatX)
params['W_hidden'] = np.random.uniform(-0.01, 0.01, (embDimSize+demoSize, hiddenDimSize)).astype(config.floatX) #emb matrix needs an extra dimension for the time
params['b_hidden'] = np.zeros(hiddenDimSize).astype(config.floatX)
if numYcodes > 0:
params['W_output'] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, numYcodes)).astype(config.floatX) #emb matrix needs an extra dimension for the time
params['b_output'] = np.zeros(numYcodes).astype(config.floatX)
else:
params['W_output'] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, numXcodes)).astype(config.floatX) #emb matrix needs an extra dimension for the time
params['b_output'] = np.zeros(numXcodes).astype(config.floatX)
return params
def load_params(options):
params = np.load(options['modelFile'])
return params
def init_tparams(params):
tparams = OrderedDict()
for k, v in params.items():
tparams[k] = theano.shared(v, name=k)
return tparams
def build_model(tparams, options):
x = T.matrix('x', dtype=config.floatX)
d = T.matrix('d', dtype=config.floatX)
y = T.matrix('y', dtype=config.floatX)
mask = T.vector('mask', dtype=config.floatX)
logEps = options['logEps']
emb = T.maximum(T.dot(x, tparams['W_emb']) + tparams['b_emb'],0)
if options['demoSize'] > 0: emb = T.concatenate((emb, d), axis=1)
visit = T.maximum(T.dot(emb, tparams['W_hidden']) + tparams['b_hidden'],0)
results = T.nnet.softmax(T.dot(visit, tparams['W_output']) + tparams['b_output'])
mask1 = (mask[:-1] * mask[1:])[:,None]
mask2 = (mask[:-2] * mask[1:-1] * mask[2:])[:,None]
mask3 = (mask[:-3] * mask[1:-2] * mask[2:-1] * mask[3:])[:,None]
mask4 = (mask[:-4] * mask[1:-3] * mask[2:-2] * mask[3:-1] * mask[4:])[:,None]
mask5 = (mask[:-5] * mask[1:-4] * mask[2:-3] * mask[3:-2] * mask[4:-1] * mask[5:])[:,None]
t = None
if options['numYcodes'] > 0: t = y
else: t = x
forward_results = results[:-1] * mask1
forward_cross_entropy = -(t[1:] * T.log(forward_results + logEps) + (1. - t[1:]) * T.log(1. - forward_results + logEps))
forward_results2 = results[:-2] * mask2
forward_cross_entropy2 = -(t[2:] * T.log(forward_results2 + logEps) + (1. - t[2:]) * T.log(1. - forward_results2 + logEps))
forward_results3 = results[:-3] * mask3
forward_cross_entropy3 = -(t[3:] * T.log(forward_results3 + logEps) + (1. - t[3:]) * T.log(1. - forward_results3 + logEps))
forward_results4 = results[:-4] * mask4
forward_cross_entropy4 = -(t[4:] * T.log(forward_results4 + logEps) + (1. - t[4:]) * T.log(1. - forward_results4 + logEps))
forward_results5 = results[:-5] * mask5
forward_cross_entropy5 = -(t[5:] * T.log(forward_results5 + logEps) + (1. - t[5:]) * T.log(1. - forward_results5 + logEps))
backward_results = results[1:] * mask1
backward_cross_entropy = -(t[:-1] * T.log(backward_results + logEps) + (1. - t[:-1]) * T.log(1. - backward_results + logEps))
backward_results2 = results[2:] * mask2
backward_cross_entropy2 = -(t[:-2] * T.log(backward_results2 + logEps) + (1. - t[:-2]) * T.log(1. - backward_results2 + logEps))
backward_results3 = results[3:] * mask3
backward_cross_entropy3 = -(t[:-3] * T.log(backward_results3 + logEps) + (1. - t[:-3]) * T.log(1. - backward_results3 + logEps))
backward_results4 = results[4:] * mask4
backward_cross_entropy4 = -(t[:-4] * T.log(backward_results4 + logEps) + (1. - t[:-4]) * T.log(1. - backward_results4 + logEps))
backward_results5 = results[5:] * mask5
backward_cross_entropy5 = -(t[:-5] * T.log(backward_results5 + logEps) + (1. - t[:-5]) * T.log(1. - backward_results5 + logEps))
visit_cost1 = (forward_cross_entropy.sum(axis=1).sum(axis=0) + backward_cross_entropy.sum(axis=1).sum(axis=0)) / (mask1.sum() + logEps)
visit_cost2 = (forward_cross_entropy2.sum(axis=1).sum(axis=0) + backward_cross_entropy2.sum(axis=1).sum(axis=0)) / (mask2.sum() + logEps)
visit_cost3 = (forward_cross_entropy3.sum(axis=1).sum(axis=0) + backward_cross_entropy3.sum(axis=1).sum(axis=0)) / (mask3.sum() + logEps)
visit_cost4 = (forward_cross_entropy4.sum(axis=1).sum(axis=0) + backward_cross_entropy4.sum(axis=1).sum(axis=0)) / (mask4.sum() + logEps)
visit_cost5 = (forward_cross_entropy5.sum(axis=1).sum(axis=0) + backward_cross_entropy5.sum(axis=1).sum(axis=0)) / (mask5.sum() + logEps)
windowSize = options['windowSize']
visit_cost = visit_cost1
if windowSize == 2:
visit_cost = visit_cost1 + visit_cost2
elif windowSize == 3:
visit_cost = visit_cost1 + visit_cost2 + visit_cost3
elif windowSize == 4:
visit_cost = visit_cost1 + visit_cost2 + visit_cost3 + visit_cost4
elif windowSize == 5:
visit_cost = visit_cost1 + visit_cost2 + visit_cost3 + visit_cost4 + visit_cost5
iVector = T.vector('iVector', dtype='int32')
jVector = T.vector('jVector', dtype='int32')
preVec = T.maximum(tparams['W_emb'],0)
norms = (T.exp(T.dot(preVec, preVec.T))).sum(axis=1)
emb_cost = -T.log((T.exp((preVec[iVector] * preVec[jVector]).sum(axis=1)) / norms[iVector]) + logEps)
total_cost = visit_cost + T.mean(emb_cost) + options['L2_reg'] * (tparams['W_emb'] ** 2).sum()
if options['demoSize'] > 0 and options['numYcodes'] > 0: return x, d, y, mask, iVector, jVector, total_cost
elif options['demoSize'] == 0 and options['numYcodes'] > 0: return x, y, mask, iVector, jVector, total_cost
elif options['demoSize'] > 0 and options['numYcodes'] == 0: return x, d, mask, iVector, jVector, total_cost
else: return x, mask, iVector, jVector, total_cost
def adadelta(tparams, grads, x, mask, iVector, jVector, cost, options, d=None, y=None):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_grad' % k) for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rup2' % k) for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k) for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
if options['demoSize'] > 0 and options['numYcodes'] > 0:
f_grad_shared = theano.function([x, d, y, mask, iVector, jVector], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
elif options['demoSize'] == 0 and options['numYcodes'] > 0:
f_grad_shared = theano.function([x, y, mask, iVector, jVector], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
elif options['demoSize'] > 0 and options['numYcodes'] == 0:
f_grad_shared = theano.function([x, d, mask, iVector, jVector], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
else:
f_grad_shared = theano.function([x, mask, iVector, jVector], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(list(tparams.values()), updir)]
f_update = theano.function([], [], updates=ru2up + param_up, on_unused_input='ignore', name='adadelta_f_update')
return f_grad_shared, f_update
def load_data(xFile, dFile, yFile):
seqX = np.array(pickle.load(open(xFile, 'rb')))
seqD = []
if len(dFile) > 0: seqD = np.asarray(pickle.load(open(dFile, 'rb')), dtype=config.floatX)
seqY = []
if len(yFile) > 0: seqY = np.array(pickle.load(open(yFile, 'rb')))
return seqX, seqD, seqY
def pickTwo(codes, iVector, jVector):
for first in codes:
for second in codes:
if first == second: continue
iVector.append(first)
jVector.append(second)
def padMatrix(seqs, labels, options):
n_samples = len(seqs)
iVector = []
jVector = []
numXcodes = options['numXcodes']
numYcodes = options['numYcodes']
if numYcodes > 0:
x = np.zeros((n_samples, numXcodes)).astype(config.floatX)
y = np.zeros((n_samples, numYcodes)).astype(config.floatX)
mask = np.zeros((n_samples,)).astype(config.floatX)
for idx, (seq, label) in enumerate(zip(seqs, labels)):
if not seq[0] == -1:
x[idx][seq] = 1.
y[idx][label] = 1.
pickTwo(seq, iVector, jVector)
mask[idx] = 1.
return x, y, mask, iVector, jVector
else:
x = np.zeros((n_samples, numXcodes)).astype(config.floatX)
mask = np.zeros((n_samples,)).astype(config.floatX)
for idx, seq in enumerate(seqs):
if not seq[0] == -1:
x[idx][seq] = 1.
pickTwo(seq, iVector, jVector)
mask[idx] = 1.
return x, mask, iVector, jVector
def train_med2vec(seqFile='seqFile.txt',
demoFile='demoFile.txt',
labelFile='labelFile.txt',
outFile='outFile.txt',
modelFile='modelFile.txt',
L2_reg=0.001,
numXcodes=20000,
numYcodes=20000,
embDimSize=1000,
hiddenDimSize=2000,
batchSize=100,
demoSize=2,
logEps=1e-8,
windowSize=1,
verbose=False,
maxEpochs=1000):
options = locals().copy()
print('initializing parameters')
params = init_params(options)
#params = load_params(options)
tparams = init_tparams(params)
print('building models')
f_grad_shared = None
f_update = None
if demoSize > 0 and numYcodes > 0:
x, d, y, mask, iVector, jVector, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=list(tparams.values()))
f_grad_shared, f_update = adadelta(tparams, grads, x, mask, iVector, jVector, cost, options, d=d, y=y)
elif demoSize == 0 and numYcodes > 0:
x, y, mask, iVector, jVector, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=list(tparams.values()))
f_grad_shared, f_update = adadelta(tparams, grads, x, mask, iVector, jVector, cost, options, y=y)
elif demoSize > 0 and numYcodes == 0:
x, d, mask, iVector, jVector, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=list(tparams.values()))
f_grad_shared, f_update = adadelta(tparams, grads, x, mask, iVector, jVector, cost, options, d=d)
else:
x, mask, iVector, jVector, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=list(tparams.values()))
f_grad_shared, f_update = adadelta(tparams, grads, x, mask, iVector, jVector, cost, options)
print('loading data')
seqs, demos, labels = load_data(seqFile, demoFile, labelFile)
n_batches = int(np.ceil(float(len(seqs)) / float(batchSize)))
print('training start')
for epoch in range(maxEpochs):
iteration = 0
costVector = []
for index in random.sample(list(range(n_batches)), n_batches):
batchX = seqs[batchSize*index:batchSize*(index+1)]
batchY = []
batchD = []
if demoSize > 0 and numYcodes > 0:
batchY = labels[batchSize*index:batchSize*(index+1)]
x, y, mask, iVector, jVector = padMatrix(batchX, batchY, options)
batchD = demos[batchSize*index:batchSize*(index+1)]
cost = f_grad_shared(x, batchD, y, mask, iVector, jVector)
elif demoSize == 0 and numYcodes > 0:
batchY = labels[batchSize*index:batchSize*(index+1)]
x, y, mask, iVector, jVector = padMatrix(batchX, batchY, options)
cost = f_grad_shared(x, y, mask, iVector, jVector)
elif demoSize > 0 and numYcodes == 0:
x, mask, iVector, jVector = padMatrix(batchX, batchY, options)
batchD = demos[batchSize*index:batchSize*(index+1)]
cost = f_grad_shared(x, batchD, mask, iVector, jVector)
else:
x, mask, iVector, jVector = padMatrix(batchX, batchY, options)
cost = f_grad_shared(x, mask, iVector, jVector)
costVector.append(cost)
f_update()
if (iteration % 10 == 0) and verbose: print('epoch:%d, iteration:%d/%d, cost:%f' % (epoch, iteration, n_batches, cost))
iteration += 1
print('epoch:%d, mean_cost:%f' % (epoch, | np.mean(costVector) | numpy.mean |
import numpy as np
import random
import binascii
def getCodewords(inputchar):
# Generator Matrix
G = np.array([[1,0,0,0,1,1,1],
[0,1,0,0,1,0,1],
[0,0,1,0,1,1,0],
[0,0,0,1,0,1,1]])
# Message
binary_m = format(ord(inputchar), 'b')
if len(binary_m) < 8:
pad = '0'
binary_m = (8 - len(binary_m)) * pad + binary_m
m = np.array([[0,0,0,0,0,0,0,0]])
print('\n{}'.format(binary_m))
for i in range(0,8):
m[0][i] = binary_m[i]
M = np.array([[0,0,0,0],[0,0,0,0]])
M[0] = | np.array(m[0][0:4]) | numpy.array |
from collections import deque
import numpy as np
class KFoldCrossValidation(object):
def __init__(self, folds_n=3):
self._folds_n = folds_n
self._scores = | np.zeros(self._folds_n) | numpy.zeros |
import unittest
from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack
from cantera import Solution, one_atm, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def validate_on_mechanism(mech, temperature, pressure, test_rhs=True, test_jac=True):
xml = join(test_mech_directory, mech + '.xml')
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
T = temperature
p = pressure
gas.TPX = T, p, ones(ns)
y = gas.Y
rho = gas.density_mass
state = hstack((rho, T, y[:-1]))
rhsGR = np.empty(ns + 1)
rhsGRTemporary = np.empty(ns + 1)
jacGR = np.empty((ns + 1) * (ns + 1))
r.reactor_rhs_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, rhsGR)
r.reactor_jac_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, 0, rhsGRTemporary, jacGR)
jacGR = jacGR.reshape((ns + 1, ns + 1), order='F')
def cantera_rhs(rho_arg, T_arg, Y_arg):
gas.TDY = T_arg, rho_arg, Y_arg
w = gas.net_production_rates * gas.molecular_weights
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
cv = gas.cv_mass
rhs = zeros(ns + 1)
rhs[0] = 0.
rhs[1] = - sum(w * e) / (rho_arg * cv)
rhs[2:] = w[:-1] / rho
return rhs
rhsCN = cantera_rhs(rho, T, y)
if test_rhs:
pass_rhs = max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if test_jac:
jacFD = zeros((ns + 1, ns + 1))
wm1 = zeros(ns + 1)
wp1 = zeros(ns + 1)
drho = 1.e-4
dT = 1.e-2
dY = 1.e-6
state_m = hstack((rho - drho, T, y[:-1]))
state_p = hstack((rho + drho, T, y[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 0] = (- wm1 + wp1) / (2. * drho)
state_m = hstack((rho, T - dT, y[:-1]))
state_p = | hstack((rho, T + dT, y[:-1])) | numpy.hstack |
import pytest
import numpy as np
import scipy as sp
import pandas as pd
import scdrs
from .test_method_score_cell_main import load_toy_data
def test_select_ctrl_geneset():
"""
Test scdrs.method._select_ctrl_geneset
"""
np.random.seed(0)
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
df_gene = pd.DataFrame(
index=adata.var_names[:100], columns=["gene", "categorical", "continuous"]
)
df_gene["gene"] = df_gene.index
df_gene["categorical"] = [1] * 50 + [2] * 50
df_gene["continuous"] = np.random.rand(100)
gene_list = list(df_gene.index[[0, 55, 27, 80, 2]])
gene_weight = [1.1, 2.5, 3.8, 4.1, 5.2]
for ctrl_match_key in ["categorical", "continuous"]:
dic_ctrl_list, dic_ctrl_weight = scdrs.method._select_ctrl_geneset(
df_gene, gene_list, gene_weight, ctrl_match_key, 1, 10, 0
)
ctrl_gene_list_sort = np.array(dic_ctrl_list[0])[np.argsort(dic_ctrl_weight[0])]
ctrl_weight_sort = np.array(dic_ctrl_weight[0])[np.argsort(dic_ctrl_weight[0])]
err_msg = "ctrl_match_key={}\n".format(ctrl_match_key)
err_msg += "|{:^15}|{:^15}|{:^15}|{:^15}|{:^15}|{:^15}|\n".format(
"GENE", ctrl_match_key, "WEIGHT", "CTRL_GENE", ctrl_match_key, "WEIGHT"
)
for i in range(len(gene_list)):
err_msg += (
"|{:^15}|{:^15.3f}|{:^15.3f}|{:^15}|{:^15.3f}|{:^15.3f}|\n".format(
gene_list[i],
df_gene.loc[gene_list[i], ctrl_match_key],
gene_weight[i],
ctrl_gene_list_sort[i],
df_gene.loc[ctrl_gene_list_sort[i], ctrl_match_key],
ctrl_weight_sort[i],
)
)
assert (
np.allclose(
df_gene.loc[gene_list, ctrl_match_key],
df_gene.loc[ctrl_gene_list_sort, ctrl_match_key],
rtol=0,
atol=0.1,
)
& np.allclose(gene_weight, ctrl_weight_sort)
), err_msg
return
def test_compute_raw_score_dense_nocov_vs():
"""
Test scdrs.method._compute_raw_score: dense+nocov+vs
"""
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
adata = adata[:, :100].copy()
gene_list = list(adata.var_names[[0, 55, 27, 80, 2]])
adata.X = adata.X.toarray()
scdrs.pp.preprocess(adata, cov=None)
v_raw_score, v_score_weight = scdrs.method._compute_raw_score(
adata, gene_list, np.ones(len(gene_list)), "vs"
)
v_score_weight_true = 1 / np.sqrt(
adata.uns["SCDRS_PARAM"]["GENE_STATS"].loc[gene_list, "var_tech"].values + 1e-2
)
v_score_weight_true = v_score_weight_true / v_score_weight_true.sum()
v_raw_score_true = adata[:, gene_list].X.dot(v_score_weight_true)
err_msg = "Dense+nocov+vs: avg_abs_score_dif=%0.2e, avg_abs_weight_dif=%0.2e" % (
np.absolute(v_raw_score - v_raw_score_true).mean(),
np.absolute(v_score_weight - v_score_weight_true).mean(),
)
assert np.allclose(v_raw_score, v_raw_score_true) & np.allclose(
v_score_weight, v_score_weight_true
), err_msg
return
def test_compute_raw_score_dense_nocov_vs_weight():
"""
Test scdrs.method._compute_raw_score: dense+nocov+vs+weight
"""
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
adata = adata[:, :100].copy()
gene_list = list(adata.var_names[[0, 55, 27, 80, 2]])
gene_weight = [1.1, 2.3, 1.8, 0.2, 3]
adata.X = adata.X.toarray()
scdrs.pp.preprocess(adata, cov=None)
v_raw_score, v_score_weight = scdrs.method._compute_raw_score(
adata, gene_list, gene_weight, "vs"
)
v_score_weight_true = 1 / np.sqrt(
adata.uns["SCDRS_PARAM"]["GENE_STATS"].loc[gene_list, "var_tech"].values + 1e-2
)
v_score_weight_true = v_score_weight_true * np.array(gene_weight)
v_score_weight_true = v_score_weight_true / v_score_weight_true.sum()
v_raw_score_true = adata[:, gene_list].X.dot(v_score_weight_true)
err_msg = (
"Dense+nocov+vs+weight: avg_abs_score_dif=%0.2e, avg_abs_weight_dif=%0.2e"
% (
np.absolute(v_raw_score - v_raw_score_true).mean(),
np.absolute(v_score_weight - v_score_weight_true).mean(),
)
)
assert np.allclose(v_raw_score, v_raw_score_true) & np.allclose(
v_score_weight, v_score_weight_true
), err_msg
return
def test_compute_raw_score_sparse_nocov_vs():
"""
Test scdrs.method._compute_raw_score: sparse+nocov+vs
"""
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
adata = adata[:, :100].copy()
gene_list = list(adata.var_names[[0, 55, 27, 80, 2]])
scdrs.pp.preprocess(adata, cov=None)
v_raw_score, v_score_weight = scdrs.method._compute_raw_score(
adata, gene_list, np.ones(len(gene_list)), "vs"
)
v_score_weight_true = 1 / np.sqrt(
adata.uns["SCDRS_PARAM"]["GENE_STATS"].loc[gene_list, "var_tech"].values + 1e-2
)
v_score_weight_true = v_score_weight_true / v_score_weight_true.sum()
v_raw_score_true = adata[:, gene_list].X.dot(v_score_weight_true)
err_msg = "Sparse+nocov+vs: avg_abs_score_dif=%0.2e, avg_abs_weight_dif=%0.2e" % (
np.absolute(v_raw_score - v_raw_score_true).mean(),
np.absolute(v_score_weight - v_score_weight_true).mean(),
)
assert np.allclose(v_raw_score, v_raw_score_true) & np.allclose(
v_score_weight, v_score_weight_true
), err_msg
return
def test_compute_raw_score_sparse_cov_vs():
"""
Test scdrs.method._compute_raw_score: sparse+nocov+vs
"""
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
adata = adata[:, :100].copy()
gene_list = list(adata.var_names[[0, 55, 27, 80, 2]])
scdrs.pp.preprocess(adata, cov=df_cov)
v_raw_score, v_score_weight = scdrs.method._compute_raw_score(
adata, gene_list, np.ones(len(gene_list)), "vs"
)
# adata.X + COV_MAT * COV_BETA + COV_GENE_MEAN
mat_X = adata[:, gene_list].X.toarray()
mat_X = mat_X + adata.uns["SCDRS_PARAM"]["COV_MAT"].values.dot(
adata.uns["SCDRS_PARAM"]["COV_BETA"].loc[gene_list].values.T
)
mat_X = mat_X + adata.uns["SCDRS_PARAM"]["COV_GENE_MEAN"].loc[gene_list].values
v_score_weight_true = 1 / np.sqrt(
adata.uns["SCDRS_PARAM"]["GENE_STATS"].loc[gene_list, "var_tech"].values + 1e-2
)
v_score_weight_true = v_score_weight_true / v_score_weight_true.sum()
v_raw_score_true = mat_X.dot(v_score_weight_true)
err_msg = "Sparse+cov+vs: avg_abs_score_dif=%0.2e, avg_abs_weight_dif=%0.2e" % (
np.absolute(v_raw_score - v_raw_score_true).mean(),
np.absolute(v_score_weight - v_score_weight_true).mean(),
)
assert np.allclose(v_raw_score, v_raw_score_true) & np.allclose(
v_score_weight, v_score_weight_true
), err_msg
return
def test_compute_raw_score_sparse_cov_vs_weight():
"""
Test scdrs.method._compute_raw_score: sparse+nocov+vs+weight
"""
adata, df_cov, df_gs, dic_res_ref = load_toy_data()
adata = adata[:, :100].copy()
gene_list = list(adata.var_names[[0, 55, 27, 80, 2]])
gene_weight = [1.1, 2.3, 1.8, 0.2, 3]
scdrs.pp.preprocess(adata, cov=df_cov)
v_raw_score, v_score_weight = scdrs.method._compute_raw_score(
adata, gene_list, gene_weight, "vs"
)
# adata.X + COV_MAT * COV_BETA + COV_GENE_MEAN
mat_X = adata[:, gene_list].X.toarray()
mat_X = mat_X + adata.uns["SCDRS_PARAM"]["COV_MAT"].values.dot(
adata.uns["SCDRS_PARAM"]["COV_BETA"].loc[gene_list].values.T
)
mat_X = mat_X + adata.uns["SCDRS_PARAM"]["COV_GENE_MEAN"].loc[gene_list].values
v_score_weight_true = 1 / np.sqrt(
adata.uns["SCDRS_PARAM"]["GENE_STATS"].loc[gene_list, "var_tech"].values + 1e-2
)
v_score_weight_true = v_score_weight_true * | np.array(gene_weight) | numpy.array |
from gym.spaces import Discrete, Box, MultiDiscrete
from gym_electric_motor.physical_systems import SynchronousMotorSystem, DcMotorSystem, DcSeriesMotor, \
DcExternallyExcitedMotor
from gym_electric_motor.reference_generators import MultipleReferenceGenerator, SwitchedReferenceGenerator
from gym_electric_motor.visualization import MotorDashboard
from gym_electric_motor import envs
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
class Controller:
"""This is the base class for every controller along with the motor environments."""
@classmethod
def make(cls, environment, stages=None, **controller_kwargs):
"""
This function creates the controller structure and optionally tunes the controller.
Args:
environment: gym-electric-motor environment to be controlled
stages: stages of the controller, if no stages are passed, the controller is automatically designed und tuned
**controller_kwargs: setting parameters for the controller and visualization
Returns:
fully designed controller for the control of the gym-electric-motor environment, which is called using the
control function
the inputs of the control function are the state and the reference, both are given by the environment
"""
controller_kwargs = cls.reference_states(environment, **controller_kwargs)
visualization, controller_kwargs = cls.get_visualization(environment, **controller_kwargs)
if stages is not None:
controller_type, stages = cls.find_controller_type(environment, stages, **controller_kwargs)
assert controller_type in _controllers.keys(), f'Controller {controller_type} unknown'
stages = cls.automated_gain(environment, stages, controller_type, **controller_kwargs)
controller = _controllers[controller_type][0](environment, stages, **controller_kwargs)
else:
controller_type, stages = cls.automated_controller_design(environment, **controller_kwargs)
stages = cls.automated_gain(environment, stages, controller_type, **controller_kwargs)
controller = _controllers[controller_type][0](environment, stages, **controller_kwargs)
controller.visualization = visualization
return controller
def control(self, state, reference):
"""
Calculation of the next control action for the environment based on the last environments states and references.
This is the main method of the controller class which is called every control cycle.
Args:
state: motor states given by the gym-electric-motor environment
reference: reference for the referenced states given by the gym-electric-motor environment
Returns:
control actions
"""
pass
def get_ref(self):
"""Function to pass the calculated reference values to the visualization."""
pass
def reset(self):
"""
Method to reset the controllers internal states (e.g. integrators) to initial values before each control episode
of the environment starts.
"""
pass
def plot(self, external_reference_plots, state_names):
"""
This method passes the latest internally generated references of the controller the ExternalReferencePlots. The
GEM-Environment uses this data to plot these references with the according states within its MotorDashboard.
Args:
external_reference_plots(Iterable[ExternalReferencedPlot]):
The External Reference Plots that the internal reference data shall be passed to.
state_names:
The list of all environment state names.
"""
if self.visualization:
external_refs = self.get_ref()
external_ref_plots = list(external_reference_plots)
ref_state_idxs = external_refs['ref_state']
plot_state_idxs = [
list(state_names).index(external_ref_plot.state) for external_ref_plot in external_reference_plots
]
ref_values = external_refs['ref_value']
for ref_state_idx, ref_value in zip(ref_state_idxs, ref_values):
try:
plot_idx = plot_state_idxs.index(ref_state_idx)
except ValueError:
pass
else:
external_ref_plots[plot_idx].external_reference(ref_value)
@staticmethod
def get_visualization(environment, **controller_kwargs):
for visualization in environment._visualizations:
if isinstance(visualization, MotorDashboard):
controller_kwargs['update_interval'] = visualization._update_interval
return True, controller_kwargs
return False, controller_kwargs
@staticmethod
def reference_states(environment, **controller_kwargs):
"""This method searches the environment for all referenced states and writes them to an array."""
ref_states = []
if isinstance(environment.reference_generator, MultipleReferenceGenerator):
for rg in environment.reference_generator._sub_generators:
if isinstance(rg, SwitchedReferenceGenerator):
ref_states.append(rg._sub_generators[0]._reference_state)
else:
ref_states.append(rg._reference_state)
elif isinstance(environment.reference_generator, SwitchedReferenceGenerator):
ref_states.append(environment.reference_generator._sub_generators[0]._reference_state)
else:
ref_states.append(environment.reference_generator._reference_state)
controller_kwargs['ref_states'] = np.array(ref_states)
return controller_kwargs
@staticmethod
def find_controller_type(environment, stages, **controller_kwargs):
_stages = stages
if isinstance(environment.physical_system, DcMotorSystem):
if type(stages) is list:
if len(stages) > 1:
if type(stages[0]) is list:
stages = stages[0]
if len(stages) > 1:
controller_type = 'cascaded_controller'
else:
controller_type = stages[0]['controller_type']
else:
controller_type = stages[0]['controller_type']
else:
if type(stages) is dict:
controller_type = stages['controller_type']
_stages = [stages]
else:
controller_type = stages
_stages = [{'controller_type': stages}]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
if len(stages) == 2:
if len(stages[1]) == 1 and 'i_sq' in controller_kwargs['ref_states']:
controller_type = 'foc_controller'
else:
controller_type = 'cascaded_foc_controller'
else:
controller_type = 'cascaded_foc_controller'
return controller_type, _stages
@staticmethod
def automated_controller_design(environment, **controller_kwargs):
"""This method automatically designs the controller based on the given motor environment and control task."""
action_space_type = type(environment.action_space)
ref_states = controller_kwargs['ref_states']
stages = []
if isinstance(environment.physical_system, DcMotorSystem): # Checking type of motor
if 'omega' in ref_states or 'torque' in ref_states: # Checking control task
controller_type = 'cascaded_controller'
for i in range(len(stages), 2):
if i == 0:
if action_space_type is Box: # Checking type of output stage (finite / cont)
stages.append({'controller_type': 'pi_controller'})
else:
stages.append({'controller_type': 'three_point'})
else:
stages.append({'controller_type': 'pi_controller'}) # Adding PI-Controller for overlaid stages
elif 'i' in ref_states or 'i_a' in ref_states:
# Checking type of output stage (finite / cont)
if action_space_type is Discrete or action_space_type is MultiDiscrete:
stages.append({'controller_type': 'three_point'})
elif action_space_type is Box:
stages.append({'controller_type': 'pi_controller'})
controller_type = stages[0]['controller_type']
# Add stage for i_e current of the ExtExDC
if isinstance(environment.physical_system.electrical_motor, DcExternallyExcitedMotor):
if action_space_type is Box:
stages = [stages, [{'controller_type': 'pi_controller'}]]
else:
stages = [stages, [{'controller_type': 'three_point'}]]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
if 'i_sq' in ref_states or 'torque' in ref_states: # Checking control task
controller_type = 'foc_controller' if 'i_sq' in ref_states else 'cascaded_foc_controller'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}]]
else:
stages = [[{'controller_type': 'pi_controller'}, {'controller_type': 'pi_controller'}]]
elif 'omega' in ref_states:
controller_type = 'cascaded_foc_controller'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}], [{'controller_type': 'pi_controller'}]]
else:
stages = [[{'controller_type': 'pi_controller'},
{'controller_type': 'pi_controller'}], [{'controller_type': 'pi_controller'}]]
else:
controller_type = 'foc_controller'
return controller_type, stages
@staticmethod
def automated_gain(environment, stages, controller_type, **controller_kwargs):
"""
This method automatically parameterizes a given controller design if the parameter automated_gain is True
(default True), based on the design according to the symmetric optimum (SO). Further information about the
design according to the SO can be found in the following paper (https://ieeexplore.ieee.org/document/55967).
"""
ref_states = controller_kwargs['ref_states']
mp = environment.physical_system.electrical_motor.motor_parameter
limits = environment.physical_system.limits
omega_lim = limits[environment.state_names.index('omega')]
if isinstance(environment.physical_system, DcMotorSystem):
i_a_lim = limits[environment.physical_system.CURRENTS_IDX[0]]
i_e_lim = limits[environment.physical_system.CURRENTS_IDX[-1]]
u_a_lim = limits[environment.physical_system.VOLTAGES_IDX[0]]
u_e_lim = limits[environment.physical_system.VOLTAGES_IDX[-1]]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
i_sd_lim = limits[environment.state_names.index('i_sd')]
i_sq_lim = limits[environment.state_names.index('i_sq')]
u_sd_lim = limits[environment.state_names.index('u_sd')]
u_sq_lim = limits[environment.state_names.index('u_sq')]
torque_lim = limits[environment.state_names.index('torque')]
# The parameter a is a design parameter when designing a controller according to the SO
a = controller_kwargs.get('a', 4)
automated_gain = controller_kwargs.get('automated_gain', True)
if isinstance(environment.physical_system.electrical_motor, DcSeriesMotor):
mp['l'] = mp['l_a'] + mp['l_e']
elif isinstance(environment.physical_system, DcMotorSystem):
mp['l'] = mp['l_a']
if 'automated_gain' not in controller_kwargs.keys() or automated_gain:
cont_extex_envs = (
envs.ContSpeedControlDcExternallyExcitedMotorEnv,
envs.ContCurrentControlDcExternallyExcitedMotorEnv,
envs.ContTorqueControlDcExternallyExcitedMotorEnv
)
finite_extex_envs = (
envs.FiniteTorqueControlDcExternallyExcitedMotorEnv,
envs.FiniteSpeedControlDcExternallyExcitedMotorEnv,
envs.FiniteCurrentControlDcExternallyExcitedMotorEnv
)
if type(environment) in cont_extex_envs:
stages_a = stages[0]
stages_e = stages[1]
p_gain = mp['l_e'] / (environment.physical_system.tau * a) / u_e_lim * i_e_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
stages_e[0]['p_gain'] = stages_e[0].get('p_gain', p_gain)
stages_e[0]['i_gain'] = stages_e[0].get('i_gain', i_gain)
if stages_e[0]['controller_type'] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_e[0]['d_gain'] = stages_e[0].get('d_gain', d_gain)
elif type(environment) in finite_extex_envs:
stages_a = stages[0]
stages_e = stages[1]
else:
stages_a = stages
stages_e = False
if _controllers[controller_type][0] == ContinuousActionController:
if 'i' in ref_states or 'i_a' in ref_states or 'torque' in ref_states:
p_gain = mp['l'] / (environment.physical_system.tau * a) / u_a_lim * i_a_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
stages_a[0]['p_gain'] = stages_a[0].get('p_gain', p_gain)
stages_a[0]['i_gain'] = stages_a[0].get('i_gain', i_gain)
if _controllers[controller_type][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[0]['d_gain'] = stages_a[0].get('d_gain', d_gain)
elif 'omega' in ref_states:
p_gain = environment.physical_system.mechanical_load.j_total * mp['r_a'] ** 2 / (
a * mp['l']) / u_a_lim * omega_lim
i_gain = p_gain / (a * mp['l'])
stages_a[0]['p_gain'] = stages_a[0].get('p_gain', p_gain)
stages_a[0]['i_gain'] = stages_a[0].get('i_gain', i_gain)
if _controllers[controller_type][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[0]['d_gain'] = stages_a[0].get('d_gain', d_gain)
elif _controllers[controller_type][0] == CascadedController:
for i in range(len(stages)):
if _controllers[stages_a[i]['controller_type']][1] == ContinuousController:
if i == 0:
p_gain = mp['l'] / (environment.physical_system.tau * a) / u_a_lim * i_a_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
if _controllers[stages_a[i]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[i]['d_gain'] = stages_a[i].get('d_gain', d_gain)
elif i == 1:
t_n = environment.physical_system.tau * a ** 2
p_gain = environment.physical_system.mechanical_load.j_total / (
a * t_n) / i_a_lim * omega_lim
i_gain = p_gain / (a * t_n)
if _controllers[stages_a[i]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[i]['d_gain'] = stages_a[i].get('d_gain', d_gain)
stages_a[i]['p_gain'] = stages_a[i].get('p_gain', p_gain)
stages_a[i]['i_gain'] = stages_a[i].get('i_gain', i_gain)
stages = stages_a if not stages_e else [stages_a, stages_e]
elif _controllers[controller_type][0] == FieldOrientedController:
if type(environment.action_space) == Box:
stage_d = stages[0][0]
stage_q = stages[0][1]
if 'i_sq' in ref_states and _controllers[stage_q['controller_type']][1] == ContinuousController:
p_gain_d = mp['l_d'] / (1.5 * environment.physical_system.tau * a) / u_sd_lim * i_sd_lim
i_gain_d = p_gain_d / (1.5 * environment.physical_system.tau * a ** 2)
p_gain_q = mp['l_q'] / (1.5 * environment.physical_system.tau * a) / u_sq_lim * i_sq_lim
i_gain_q = p_gain_q / (1.5 * environment.physical_system.tau * a ** 2)
stage_d['p_gain'] = stage_d.get('p_gain', p_gain_d)
stage_d['i_gain'] = stage_d.get('i_gain', i_gain_d)
stage_q['p_gain'] = stage_q.get('p_gain', p_gain_q)
stage_q['i_gain'] = stage_q.get('i_gain', i_gain_q)
if _controllers[stage_d['controller_type']][2] == PIDController:
d_gain_d = p_gain_d * environment.physical_system.tau
stage_d['d_gain'] = stage_d.get('d_gain', d_gain_d)
if _controllers[stage_q['controller_type']][2] == PIDController:
d_gain_q = p_gain_q * environment.physical_system.tau
stage_q['d_gain'] = stage_q.get('d_gain', d_gain_q)
stages = [[stage_d, stage_q]]
elif _controllers[controller_type][0] == CascadedFieldOrientedController:
if type(environment.action_space) is Box:
stage_d = stages[0][0]
stage_q = stages[0][1]
if 'torque' not in controller_kwargs['ref_states']:
overlaid = stages[1]
p_gain_d = mp['l_d'] / (1.5 * environment.physical_system.tau * a) / u_sd_lim * i_sd_lim
i_gain_d = p_gain_d / (1.5 * environment.physical_system.tau * a ** 2)
p_gain_q = mp['l_q'] / (1.5 * environment.physical_system.tau * a) / u_sq_lim * i_sq_lim
i_gain_q = p_gain_q / (1.5 * environment.physical_system.tau * a ** 2)
stage_d['p_gain'] = stage_d.get('p_gain', p_gain_d)
stage_d['i_gain'] = stage_d.get('i_gain', i_gain_d)
stage_q['p_gain'] = stage_q.get('p_gain', p_gain_q)
stage_q['i_gain'] = stage_q.get('i_gain', i_gain_q)
if _controllers[stage_d['controller_type']][2] == PIDController:
d_gain_d = p_gain_d * environment.physical_system.tau
stage_d['d_gain'] = stage_d.get('d_gain', d_gain_d)
if _controllers[stage_q['controller_type']][2] == PIDController:
d_gain_q = p_gain_q * environment.physical_system.tau
stage_q['d_gain'] = stage_q.get('d_gain', d_gain_q)
if 'torque' not in controller_kwargs['ref_states'] and \
_controllers[overlaid[0]['controller_type']][1] == ContinuousController:
t_n = p_gain_d / i_gain_d
j_total = environment.physical_system.mechanical_load.j_total
p_gain = j_total / (a ** 2 * t_n) / torque_lim * omega_lim
i_gain = p_gain / (a * t_n)
overlaid[0]['p_gain'] = overlaid[0].get('p_gain', p_gain)
overlaid[0]['i_gain'] = overlaid[0].get('i_gain', i_gain)
if _controllers[overlaid[0]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
overlaid[0]['d_gain'] = overlaid[0].get('d_gain', d_gain)
stages = [[stage_d, stage_q], overlaid]
else:
stages = [[stage_d, stage_q]]
else:
if 'omega' in ref_states and _controllers[stages[3][0]['controller_type']][1] == ContinuousController:
p_gain = environment.physical_system.mechanical_load.j_total / (
1.5 * a ** 2 * mp['p'] * np.abs(mp['l_d'] - mp['l_q'])) / i_sq_lim * omega_lim
i_gain = p_gain / (1.5 * environment.physical_system.tau * a)
stages[3][0]['p_gain'] = stages[3][0].get('p_gain', p_gain)
stages[3][0]['i_gain'] = stages[3][0].get('i_gain', i_gain)
if _controllers[stages[3][0]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages[3][0]['d_gain'] = stages[3][0].get('d_gain', d_gain)
return stages
class ContinuousActionController(Controller):
"""
This class performs a current-control for all continuous DC motor systems. By default, a PI controller is used
for current control. An EMF compensation is applied. For the externally excited dc motor, the excitation current
is also controlled.
"""
def __init__(self, environment, stages, ref_states, external_ref_plots=[], **controller_kwargs):
assert type(environment.action_space) is Box and isinstance(environment.physical_system,
DcMotorSystem), 'No suitable action space for Continuous Action Controller'
self.action_space = environment.action_space
self.state_names = environment.state_names
self.ref_idx = np.where(ref_states != 'i_e')[0][0]
self.ref_state_idx = environment.state_names.index(ref_states[self.ref_idx])
self.i_idx = environment.physical_system.CURRENTS_IDX[-1]
self.u_idx = environment.physical_system.VOLTAGES_IDX[-1]
self.limit = environment.physical_system.limits[environment.state_filter]
self.nominal_values = environment.physical_system.nominal_state[environment.state_filter]
self.omega_idx = self.state_names.index('omega')
self.action = np.zeros(self.action_space.shape[0])
self.control_e = isinstance(environment.physical_system.electrical_motor, DcExternallyExcitedMotor)
mp = environment.physical_system.electrical_motor.motor_parameter
self.psi_e = mp.get('psi_e', None)
self.l_e = mp.get('l_e_prime', None)
self.external_ref_plots = external_ref_plots
self.action_limit_low = self.action_space.low[0] * self.nominal_values[self.u_idx] / self.limit[self.u_idx]
self.action_limit_high = self.action_space.high[0] * self.nominal_values[self.u_idx] / self.limit[self.u_idx]
for ext_ref_plot in self.external_ref_plots:
ext_ref_plot.set_reference(ref_states)
if self.control_e:
assert len(stages) == 2, 'Controller design is incomplete'
assert 'i_e' in ref_states, 'No reference for i_e'
self.ref_e_idx = np.where(ref_states == 'i_e')[0][0]
self.controller_e = _controllers[stages[1][0]['controller_type']][1].make(environment, stages[1][0],
**controller_kwargs)
self.controller = _controllers[stages[0][0]['controller_type']][1].make(environment, stages[0][0],
**controller_kwargs)
u_e_idx = self.state_names.index('u_e')
self.action_e_limit_low = self.action_space.low[1] * self.nominal_values[u_e_idx] / self.limit[u_e_idx]
self.action_e_limit_high = self.action_space.high[1] * self.nominal_values[u_e_idx] / self.limit[u_e_idx]
else:
if 'i_e' not in ref_states:
assert len(ref_states) <= 1, 'Too many referenced states'
self.controller = _controllers[stages[0]['controller_type']][1].make(environment, stages[0],
**controller_kwargs)
def control(self, state, reference):
self.action[0] = self.controller.control(state[self.ref_state_idx], reference[self.ref_idx]) + self.feedforward(
state)
if self.action_limit_low <= self.action[0] <= self.action_limit_high:
self.controller.integrate(state[self.ref_state_idx], reference[self.ref_idx])
else:
self.action[0] = np.clip(self.action[0], self.action_limit_low, self.action_limit_high)
if self.control_e:
self.action[1] = self.controller_e.control(state[self.i_idx], reference[self.ref_e_idx])
if self.action_e_limit_low <= self.action[1] <= self.action_e_limit_high:
self.controller_e.integrate(state[self.i_idx], reference[self.ref_e_idx])
else:
self.action[1] = np.clip(self.action[1], self.action_e_limit_low, self.action_e_limit_high)
self.plot(self.external_ref_plots, self.state_names)
return self.action
def get_ref(self):
return dict(ref_state=[], ref_value=[])
def reset(self):
self.controller.reset()
if self.control_e:
self.controller_e.reset()
def feedforward(self, state):
psi_e = self.psi_e or self.l_e * state[self.i_idx] * self.nominal_values[self.i_idx]
return (state[self.omega_idx] * self.nominal_values[self.omega_idx] * psi_e) / self.nominal_values[self.u_idx]
class DiscreteActionController(Controller):
"""
This class is used for current control of all DC motor systems with discrete actions. By default, a three-point
controller is used. For the externally excited dc motor, the excitation current is also controlled.
"""
def __init__(self, environment, stages, ref_states, external_ref_plots=[], **controller_kwargs):
assert type(environment.action_space) in [Discrete, MultiDiscrete] and isinstance(environment.physical_system,
DcMotorSystem), 'No suitable action space for Discrete Action Controller'
self.ref_idx = np.where(ref_states != 'i_e')[0][0]
self.ref_state_idx = environment.state_names.index(ref_states[self.ref_idx])
self.i_idx = environment.physical_system.CURRENTS_IDX[-1]
self.control_e = isinstance(environment.physical_system.electrical_motor, DcExternallyExcitedMotor)
self.state_names = environment.state_names
self.external_ref_plots = external_ref_plots
for ext_ref_plot in self.external_ref_plots:
ext_ref_plot.set_reference(ref_states)
if self.control_e:
assert len(stages) == 2, 'Controller design is incomplete'
assert 'i_e' in ref_states, 'No reference for i_e'
self.ref_e_idx = np.where(ref_states == 'i_e')[0][0]
self.controller_e = _controllers[stages[1][0]['controller_type']][1].make(environment, stages[1][0],
control_e=True,
**controller_kwargs)
self.controller = _controllers[stages[0][0]['controller_type']][1].make(environment, stages[0][0],
**controller_kwargs)
else:
assert len(ref_states) <= 1, 'Too many referenced states'
self.controller = _controllers[stages[0]['controller_type']][1].make(environment, stages[0],
**controller_kwargs)
def control(self, state, reference):
self.plot(self.external_ref_plots, self.state_names)
if self.control_e:
return [self.controller.control(state[self.ref_state_idx], reference[self.ref_idx]),
self.controller_e.control(state[self.i_idx], reference[self.ref_e_idx])]
else:
return self.controller.control(state[self.ref_state_idx], reference[self.ref_idx])
def get_ref(self):
return dict(ref_state=[], ref_value=[])
def reset(self):
self.controller.reset()
if self.control_e:
self.control_e.reset()
class CascadedController(Controller):
"""
This class is used for cascaded torque and speed control of all dc motor environments. Each stage can contain
continuous or discrete controllers. For the externally excited dc motor an additional controller is used for
the excitation current. The calculated reference values of the intermediate stages can be inserted into the
plots.
"""
def __init__(self, environment, stages, ref_states, external_ref_plots=[], **controller_kwargs):
self.action_space = environment.action_space
self.state_space = environment.physical_system.state_space
self.state_names = environment.state_names
self.i_e_idx = environment.physical_system.CURRENTS_IDX[-1]
self.i_a_idx = environment.physical_system.CURRENTS_IDX[0]
self.u_idx = environment.physical_system.VOLTAGES_IDX[-1]
self.omega_idx = environment.state_names.index('omega')
self.torque_idx = environment.state_names.index('torque')
self.ref_idx = np.where(ref_states != 'i_e')[0][0]
self.ref_state_idx = [self.i_a_idx, environment.state_names.index(ref_states[self.ref_idx])]
self.limit = environment.physical_system.limits[environment.state_filter]
self.nominal_values = environment.physical_system.nominal_state[environment.state_filter]
self.control_e = isinstance(environment.physical_system.electrical_motor, DcExternallyExcitedMotor)
self.control_omega = 0
mp = environment.physical_system.electrical_motor.motor_parameter
self.psi_e = mp.get('psie_e', False)
self.l_e = mp.get('l_e_prime', False)
self.r_e = mp.get('r_e', None)
self.r_a = mp.get('r_a', None)
if type(self.action_space) is Box:
self.action_limit_low = self.action_space.low[0] * self.nominal_values[self.u_idx] / self.limit[self.u_idx]
self.action_limit_high = self.action_space.high[0] * self.nominal_values[self.u_idx] / self.limit[self.u_idx]
self.state_limit_low = self.state_space.low * self.nominal_values / self.limit
self.state_limit_high = self.state_space.high * self.nominal_values / self.limit
if self.control_e:
assert len(stages) == 2, 'Controller design is incomplete'
self.ref_e_idx = False if 'i_e' not in ref_states else np.where(ref_states=='i_e')[0][0]
self.control_e_idx = 1
if self.omega_idx in self.ref_state_idx:
self.ref_state_idx.insert(1, self.torque_idx)
self.control_omega = 1
self.ref_state_idx.append(self.i_e_idx)
self.controller_e = _controllers[stages[1][0]['controller_type']][1].make(environment, stages[1][0],
control_e=True,
**controller_kwargs)
stages = stages[0]
u_e_idx = self.state_names.index('u_e')
if type(self.action_space) is Box:
self.action_e_limit_low = self.action_space.low[1] * self.nominal_values[u_e_idx] / self.limit[u_e_idx]
self.action_e_limit_high = self.action_space.high[1] * self.nominal_values[u_e_idx] / self.limit[u_e_idx]
else:
self.control_e_idx = 0
assert len(ref_states) <= 1, 'Too many referenced states'
self.stage_type = [_controllers[stage['controller_type']][1] == ContinuousController for stage in stages]
self.controller_stages = [
_controllers[stage['controller_type']][1].make(environment, stage, cascaded=stages.index(stage) != 0) for
stage in stages]
self.external_ref_plots = external_ref_plots
internal_refs = np.array([environment.state_names[i] for i in self.ref_state_idx])
ref_states_plotted = np.unique(np.append(ref_states, internal_refs))
for external_plots in self.external_ref_plots:
external_plots.set_reference(ref_states_plotted)
assert type(self.action_space) is Box or not self.stage_type[0], 'No suitable inner controller'
assert type(self.action_space) in [Discrete, MultiDiscrete] or self.stage_type[
0], 'No suitable inner controller'
self.ref = np.zeros(len(self.controller_stages) + self.control_e_idx + self.control_omega)
def control(self, state, reference):
self.ref[-1-self.control_e_idx] = reference[self.ref_idx]
for i in range(len(self.controller_stages) - 1, 0 + self.control_e_idx - self.control_omega, -1):
ref_idx = i - 1 + self.control_omega
state_idx = self.ref_state_idx[ref_idx]
self.ref[ref_idx] = self.controller_stages[i].control(
state[state_idx], self.ref[ref_idx + 1])
if (self.state_limit_low[state_idx] <= self.ref[ref_idx] <= self.state_limit_high[state_idx]) and self.stage_type[i]:
self.controller_stages[i].integrate(state[self.ref_state_idx[i + self.control_omega]], reference[0])
elif self.stage_type[i]:
self.ref[ref_idx] = np.clip(self.ref[ref_idx], self.state_limit_low[state_idx],
self.state_limit_high[state_idx])
if self.control_e:
i_e = np.clip(
np.power(self.r_a * (self.ref[1] * self.limit[self.torque_idx]) ** 2 / (self.r_e * self.l_e ** 2),
1 / 4), self.action_space.low[1] * self.limit[self.i_e_idx],
self.action_space.high[1] * self.limit[self.i_e_idx])
i_a = np.clip(self.ref[1] * self.limit[self.torque_idx] / (self.l_e * i_e),
self.action_space.low[0] * self.limit[self.i_a_idx],
self.action_space.high[0] * self.limit[self.i_a_idx])
self.ref[-1] = i_e / self.limit[self.i_e_idx]
self.ref[0] = i_a / self.limit[self.i_a_idx]
action = self.controller_stages[0].control(state[self.ref_state_idx[0]], self.ref[0])
if self.stage_type[0]:
action += self.feedforward(state)
if self.action_limit_low <= action <= self.action_limit_high:
self.controller_stages[0].integrate(state[self.ref_state_idx[0]], self.ref[0])
action = [action]
else:
action = np.clip([action], self.action_limit_low, self.action_limit_high)
if self.control_e:
if self.ref_e_idx:
self.ref[-1] = reference[self.ref_e_idx]
action_u_e = self.controller_e.control(state[self.i_e_idx], self.ref[-1])
if self.stage_type[0]:
action = np.append(action, action_u_e)
if self.action_e_limit_low <= action[1] <= self.action_e_limit_high:
self.controller_e.integrate(state[self.i_e_idx], self.ref[-1])
action = np.clip(action, self.action_e_limit_low, self.action_e_limit_high)
else:
action = np.array([action, action_u_e], dtype='object')
self.plot(self.external_ref_plots, self.state_names)
return action
def feedforward(self, state):
psi_e = max(self.psi_e or self.l_e * state[self.i_e_idx] * self.nominal_values[self.i_e_idx], 1e-6)
return (state[self.omega_idx] * self.nominal_values[self.omega_idx] * psi_e) / self.nominal_values[self.u_idx]
def get_ref(self):
return dict(ref_state=self.ref_state_idx, ref_value=self.ref)
def reset(self):
for controller in self.controller_stages:
controller.reset()
if self.control_e:
self.controller_e.reset()
class FieldOrientedController(Controller):
"""
This class controls the currents of synchronous motors. In the case of continuous manipulated variables, the
control is performed in the rotating dq-coordinates. For this purpose, the two current components are optionally
decoupled and two independent current controllers are used.
In the case of discrete manipulated variables, control takes place in stator-fixed coordinates. The reference
values are converted into these coordinates so that a on-off controller calculates the corresponding
manipulated variable for each current component.
"""
def __init__(self, environment, stages, ref_states, external_ref_plots=[], **controller_kwargs):
assert isinstance(environment.physical_system, SynchronousMotorSystem), 'No suitable Environment for FOC Controller'
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
self.backward_transformation = (lambda quantities, eps: t32(q(quantities[::-1], eps)))
self.tau = environment.physical_system.tau
self.ref_d_idx = np.where(ref_states == 'i_sd')[0][0]
self.ref_q_idx = np.where(ref_states == 'i_sq')[0][0]
self.d_idx = environment.state_names.index(ref_states[self.ref_d_idx])
self.q_idx = environment.state_names.index(ref_states[self.ref_q_idx])
self.action_space = environment.action_space
self.state_space = environment.physical_system.state_space
self.state_names = environment.state_names
self.i_sd_idx = environment.state_names.index('i_sd')
self.i_sq_idx = environment.state_names.index('i_sq')
self.u_sd_idx = environment.state_names.index('u_sd')
self.u_sq_idx = environment.state_names.index('u_sq')
self.u_a_idx = environment.state_names.index('u_a')
self.u_b_idx = environment.state_names.index('u_b')
self.u_c_idx = environment.state_names.index('u_c')
self.omega_idx = environment.state_names.index('omega')
self.eps_idx = environment.state_names.index('epsilon')
self.limit = environment.physical_system.limits
self.mp = environment.physical_system.electrical_motor.motor_parameter
self.psi_p = self.mp.get('psi_p', 0)
self.dead_time = 1.5 if environment.physical_system.converter._dead_time else 0.5
self.has_cont_action_space = type(self.action_space) is Box
self.external_ref_plots = external_ref_plots
for ext_ref_plot in self.external_ref_plots:
ext_ref_plot.set_reference(ref_states)
if self.has_cont_action_space:
assert len(stages[0]) == 2, 'Number of stages not correct'
self.decoupling = controller_kwargs.get('decoupling', True)
[self.u_sq_0, self.u_sd_0] = [0, 0]
self.d_controller = _controllers[stages[0][0]['controller_type']][1].make(
environment, stages[0][0], **controller_kwargs)
self.q_controller = _controllers[stages[0][1]['controller_type']][1].make(
environment, stages[0][1], **controller_kwargs)
else:
assert len(stages) == 3, 'Number of stages not correct'
self.abc_controller = [_controllers[stages[0][0]['controller_type']][1].make(
environment, stages[i][0], **controller_kwargs) for i in range(3)]
self.i_abc_idx = [environment.state_names.index(state) for state in ['i_a', 'i_b', 'i_c']]
def control(self, state, reference):
epsilon_d = state[self.eps_idx] * self.limit[self.eps_idx] + self.dead_time * self.tau * \
state[self.omega_idx] * self.limit[self.omega_idx] * self.mp['p']
if self.has_cont_action_space:
if self.decoupling:
self.u_sd_0 = -state[self.omega_idx] * self.mp['p'] * self.mp['l_q'] * state[self.i_sq_idx] * self.limit[
self.i_sq_idx] / self.limit[self.u_sd_idx] * self.limit[self.omega_idx]
self.u_sq_0 = state[self.omega_idx] * self.mp['p'] * (
state[self.i_sd_idx] * self.mp['l_d'] * self.limit[self.u_sd_idx] + self.psi_p) / self.limit[
self.u_sq_idx] * self.limit[self.omega_idx]
u_sd = self.d_controller.control(state[self.d_idx], reference[self.ref_d_idx]) + self.u_sd_0
u_sq = self.q_controller.control(state[self.q_idx], reference[self.ref_q_idx]) + self.u_sq_0
action_temp = self.backward_transformation((u_sq, u_sd), epsilon_d)
action_temp = action_temp - 0.5 * (max(action_temp) + min(action_temp))
action = np.clip(action_temp, self.action_space.low[0], self.action_space.high[0])
if action.all() == action_temp.all():
self.d_controller.integrate(state[self.d_idx], reference[self.ref_d_idx])
self.q_controller.integrate(state[self.q_idx], reference[self.ref_q_idx])
else:
ref_abc = self.backward_transformation((reference[self.ref_q_idx], reference[self.ref_d_idx]), epsilon_d)
action = 0
for i in range(3):
action += (2 ** (2 - i)) * self.abc_controller[i].control(state[self.i_abc_idx[i]], ref_abc[i])
self.plot(self.external_ref_plots, self.state_names)
return action
def get_ref(self):
return dict(ref_state=[], ref_value=[])
def reset(self):
if self.has_cont_action_space:
self.d_controller.reset()
self.q_controller.reset()
class CascadedFieldOrientedController(Controller):
"""
This controller is used for torque or speed control of synchronous motors. The controller consists of a field
oriented controller for current control, an efficiency-optimized torque controller and an optional speed
controller. The current control is equivalent to the current control of the FieldOrientedController. The torque
controller is based on the maximum torque per current (MTPC) control strategy in the voltage control range and
the maximum torque per flux (MTPF) control strategy with an additional modulation controller in the flux
weakening range. The speed controller is designed as a PI-controller by default.
"""
def __init__(self, environment, stages, ref_states, external_ref_plots=[], plot_torque=True, plot_modulation=False,
update_interval=1000, torque_control='interpolate', **controller_kwargs):
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
self.backward_transformation = (lambda quantities, eps: t32(q(quantities[::-1], eps)))
self.tau = environment.physical_system.tau
self.action_space = environment.action_space
self.state_space = environment.physical_system.state_space
self.state_names = environment.state_names
self.i_sd_idx = environment.state_names.index('i_sd')
self.i_sq_idx = environment.state_names.index('i_sq')
self.u_sd_idx = environment.state_names.index('u_sd')
self.u_sq_idx = environment.state_names.index('u_sq')
self.u_a_idx = environment.state_names.index('u_a')
self.u_b_idx = environment.state_names.index('u_b')
self.u_c_idx = environment.state_names.index('u_c')
self.omega_idx = environment.state_names.index('omega')
self.eps_idx = environment.state_names.index('epsilon')
self.torque_idx = environment.state_names.index('torque')
self.external_ref_plots = external_ref_plots
self.torque_control = 'torque' in ref_states or 'omega' in ref_states
self.current_control = 'i_sd' in ref_states
self.omega_control = 'omega' in ref_states
if self.current_control:
self.ref_d_idx = np.where(ref_states == 'i_sd')[0][0]
self.ref_idx = np.where(ref_states != 'i_sd')[0][0]
self.ref_state_idx = [self.i_sq_idx, environment.state_names.index(ref_states[self.ref_idx])]
self.omega_control = 'omega' in ref_states and type(environment)
self.has_cont_action_space = type(self.action_space) is Box
self.limit = environment.physical_system.limits
self.nominal_values = environment.physical_system.nominal_state
self.mp = environment.physical_system.electrical_motor.motor_parameter
self.psi_p = self.mp.get('psi_p', 0)
self.dead_time = 1.5 if environment.physical_system.converter._dead_time else 0.5
self.decoupling = controller_kwargs.get('decoupling', True)
self.ref_state_idx = [self.i_sq_idx, self.i_sd_idx]
if self.torque_control:
self.ref_state_idx.append(self.torque_idx)
self.torque_controller = TorqueToCurrentConversion(environment, plot_torque, plot_modulation,
update_interval, torque_control)
if self.omega_control:
self.ref_state_idx.append(self.omega_idx)
self.ref_idx = 0
if self.has_cont_action_space:
assert len(stages[0]) == 2, 'Number of stages not correct'
self.d_controller = _controllers[stages[0][0]['controller_type']][1].make(
environment, stages[0][0], **controller_kwargs)
self.q_controller = _controllers[stages[0][1]['controller_type']][1].make(
environment, stages[0][1], **controller_kwargs)
[self.u_sq_0, self.u_sd_0] = [0, 0]
if self.omega_control:
self.overlaid_controller = [_controllers[stages[1][i]['controller_type']][1].make(
environment, stages[1][i], cascaded=True, **controller_kwargs) for i in range(0, len(stages[1]))]
self.overlaid_type = [_controllers[stages[1][i]['controller_type']][1] == ContinuousController for i in
range(0, len(stages[1]))]
else:
if self.omega_control:
assert len(stages) == 4, 'Number of stages not correct'
self.overlaid_controller = [_controllers[stages[3][i]['controller_type']][1].make(
environment, stages[3][i], cascaded=True, **controller_kwargs) for i in range(len(stages[3]))]
self.overlaid_type = [_controllers[stages[3][i]['controller_type']][1] == ContinuousController for i in
range(len(stages[3]))]
else:
assert len(stages) == 3, 'Number of stages not correct'
self.abc_controller = [_controllers[stages[0][0]['controller_type']][1].make(
environment, stages[i][0], **controller_kwargs) for i in range(3)]
self.i_abc_idx = [environment.state_names.index(state) for state in ['i_a', 'i_b', 'i_c']]
self.ref = np.zeros(len(self.ref_state_idx))
self.p = [environment.state_names[i] for i in self.ref_state_idx]
plot_ref = np.append(np.array([environment.state_names[i] for i in self.ref_state_idx]), ref_states)
for ext_ref_plot in self.external_ref_plots:
ext_ref_plot.set_reference(plot_ref)
def control(self, state, reference):
self.ref[-1] = reference[self.ref_idx]
epsilon_d = state[self.eps_idx] * self.limit[self.eps_idx] + self.dead_time * self.tau * state[self.omega_idx] * \
self.limit[self.omega_idx] * self.mp['p']
if self.omega_control:
for i in range(len(self.overlaid_controller) + 1, 1, -1):
self.ref[i] = self.overlaid_controller[i-2].control(state[self.ref_state_idx[i + 1]], self.ref[i + 1])
if (0.85 * self.state_space.low[self.ref_state_idx[i]] <= self.ref[i] <= 0.85 *
self.state_space.high[self.ref_state_idx[i]]) and self.overlaid_type[i - 2]:
self.overlaid_controller[i - 2].integrate(state[self.ref_state_idx[i + 1]], self.ref[i + 1])
else:
self.ref[i] = np.clip(self.ref[i], self.nominal_values[self.ref_state_idx[i]] / self.limit[
self.ref_state_idx[i]] * self.state_space.low[self.ref_state_idx[i]],
self.nominal_values[self.ref_state_idx[i]] / self.limit[
self.ref_state_idx[i]] * self.state_space.high[self.ref_state_idx[i]])
if self.torque_control:
torque = self.ref[2] * self.limit[self.torque_idx]
self.ref[0], self.ref[1] = self.torque_controller.control(state, torque)
if self.has_cont_action_space:
if self.decoupling:
self.u_sd_0 = -state[self.omega_idx] * self.mp['p'] * self.mp['l_q'] * state[self.i_sq_idx]\
* self.limit[self.i_sq_idx] / self.limit[self.u_sd_idx] * self.limit[self.omega_idx]
self.u_sq_0 = state[self.omega_idx] * self.mp['p'] * (
state[self.i_sd_idx] * self.mp['l_d'] * self.limit[self.u_sd_idx] + self.psi_p) / self.limit[
self.u_sq_idx] * self.limit[self.omega_idx]
if self.torque_control:
u_sd = self.d_controller.control(state[self.i_sd_idx], self.ref[1]) + self.u_sd_0
else:
u_sd = self.d_controller.control(state[self.i_sd_idx], reference[self.ref_d_idx]) + self.u_sd_0
u_sq = self.q_controller.control(state[self.i_sq_idx], self.ref[0]) + self.u_sq_0
action_temp = self.backward_transformation((u_sq, u_sd), epsilon_d)
action_temp = action_temp - 0.5 * (max(action_temp) + min(action_temp))
action = np.clip(action_temp, self.action_space.low[0], self.action_space.high[0])
if action.all() == action_temp.all():
if self.torque_control:
self.d_controller.integrate(state[self.i_sd_idx], self.ref[1])
else:
self.d_controller.integrate(state[self.i_sd_idx], reference[self.ref_d_idx])
self.q_controller.integrate(state[self.i_sq_idx], self.ref[0])
else:
r = self.ref[1] if self.torque_control else reference[self.ref_d_idx]
ref_abc = self.backward_transformation((self.ref[0], r), epsilon_d)
action = 0
for i in range(3):
action += (2 ** (2 - i)) * self.abc_controller[i].control(state[self.i_abc_idx[i]], ref_abc[i])
self.plot(self.external_ref_plots, self.state_names)
return action
def get_ref(self):
return dict(ref_state=self.ref_state_idx[:-1], ref_value=self.ref[:-1])
def reset(self):
if self.omega_control:
for overlaid_controller in self.overlaid_controller:
overlaid_controller.reset()
if self.has_cont_action_space:
self.d_controller.reset()
self.q_controller.reset()
else:
for abc_controller in self.abc_controller:
abc_controller.reset()
if self.torque_control:
self.torque_controller.reset()
class TorqueToCurrentConversion:
"""
This class represents the torque controller for cascaded control of synchronous motors. For low speeds only the
current limitation of the motor is important. The current vector to set a desired torque is selected so that the
amount of the current vector is minimum (Maximum Torque per Current). For higher speeds, the voltage limitation
of the synchronous motor or the actuator must also be taken into account. This is done by converting the
available voltage to a speed-dependent maximum flux. An additional modulation controller is used for the flux
control. By limiting the flux and the maximum torque per flux (MTPF), an operating point for the flux and the
torque is obtained. This is then converted into a current operating point. The conversion can be done by
different methods (parameter torque_control). On the one hand, maps can be determined in advance by
interpolation or analytically, or the analytical determination can be done online.
For the visualization of the operating points, both for the current operating points as well as the flux and
torque operating points, predefined plots are available (plot_torque: default True). Also the values of the
modulation controller can be visualized (plot_modulation: default False).
"""
def __init__(self, environment, plot_torque=True, plot_modulation=False, update_interval=1000,
torque_control='interpolate'):
self.mp = environment.physical_system.electrical_motor.motor_parameter
self.limit = environment.physical_system.limits
self.nominal_values = environment.physical_system.nominal_state
self.torque_control = torque_control
self.l_d = self.mp['l_d']
self.l_q = self.mp['l_q']
self.p = self.mp['p']
self.psi_p = self.mp.get('psi_p', 0)
self.invert = -1 if (self.psi_p == 0 and self.l_q < self.l_d) else 1
self.tau = environment.physical_system.tau
self.omega_idx = environment.state_names.index('omega')
self.i_sd_idx = environment.state_names.index('i_sd')
self.i_sq_idx = environment.state_names.index('i_sq')
self.u_sd_idx = environment.state_names.index('u_sd')
self.u_sq_idx = environment.state_names.index('u_sq')
self.torque_idx = environment.state_names.index('torque')
self.epsilon_idx = environment.state_names.index('epsilon')
self.a_max = 2 / np.sqrt(3) # maximum modulation level
self.k_ = 0.95
d = 1.2 # damping of the modulation controller
alpha = d / (d - np.sqrt(d ** 2 - 1))
self.i_gain = 1 / (self.mp['l_q'] / (1.25 * self.mp['r_s'])) * (alpha - 1) / alpha ** 2
self.u_a_idx = environment.state_names.index('u_a')
self.u_dc = np.sqrt(3) * self.limit[self.u_a_idx]
self.limited = False
self.integrated = 0
self.psi_high = 0.2 * np.sqrt((self.psi_p + self.l_d * self.nominal_values[self.i_sd_idx]) ** 2 + (
self.l_q * self.nominal_values[self.i_sq_idx]) ** 2)
self.psi_low = -self.psi_high
self.integrated_reset = 0.01 * self.psi_low # Reset value of the modulation controller
self.t_count = 250
self.psi_count = 250
self.i_count = 500
self.torque_list = []
self.psi_list = []
self.k_list = []
self.i_d_list = []
self.i_q_list = []
def mtpc():
def i_q_(i_d, torque):
return torque / (i_d * (self.l_d - self.l_q) + self.psi_p) / (1.5 * self.p)
def i_d_(i_q, torque):
return -np.abs(torque / (1.5 * self.p * (self.l_d - self.l_q) * i_q))
# calculate the maximum torque
self.max_torque = max(
1.5 * self.p * (self.psi_p + (self.l_d - self.l_q) * (-self.limit[self.i_sd_idx])) * self.limit[
self.i_sq_idx], self.limit[self.torque_idx])
torque = np.linspace(-self.max_torque, self.max_torque, self.t_count)
characteristic = []
for t in torque:
if self.psi_p != 0:
if self.l_d == self.l_q:
i_d = 0
else:
i_d = np.linspace(-2.5*self.limit[self.i_sd_idx], 0, self.i_count)
i_q = i_q_(i_d, t)
else:
i_q = np.linspace(-2.5*self.limit[self.i_sq_idx], 2.5*self.limit[self.i_sq_idx], self.i_count)
if self.l_d == self.l_q:
i_d = 0
else:
i_d = i_d_(i_q, t)
# Different current vectors are determined for each torque and the smallest magnitude is selected
i = np.power(i_d, 2) + np.power(i_q, 2)
min_idx = np.where(i == np.amin(i))[0][0]
if self.l_d == self.l_q:
i_q_ret = i_q
i_d_ret = i_d
else:
i_q_ret = np.sign((self.l_q - self.l_d) * t) * np.abs(i_q[min_idx])
i_d_ret = i_d[min_idx]
# The flow is finally calculated from the currents
psi = np.sqrt((self.psi_p + self.l_d * i_d_ret) ** 2 + (self.l_q * i_q_ret) ** 2)
characteristic.append([t, i_d_ret, i_q_ret, psi])
return np.array(characteristic)
def mtpf():
# maximum flux is calculated
self.psi_max_mtpf = np.sqrt((self.psi_p + self.l_d * self.nominal_values[self.i_sd_idx]) ** 2 + (
self.l_q * self.nominal_values[self.i_sq_idx]) ** 2)
psi = np.linspace(0, self.psi_max_mtpf, self.psi_count)
i_d = np.linspace(-self.nominal_values[self.i_sd_idx], 0, self.i_count)
i_d_best = 0
i_q_best = 0
psi_i_d_q = []
# Iterates through all flux values to determine the maximum torque
for psi_ in psi:
if psi_ == 0:
i_d_ = -self.psi_p / self.l_d
i_q = 0
t = 0
psi_i_d_q.append([psi_, t, i_d_, i_q])
else:
if self.psi_p == 0:
i_q_best = psi_ / np.sqrt(self.l_d ** 2 + self.l_q ** 2)
i_d_best = -i_q_best
t = 1.5 * self.p * (self.psi_p + (self.l_d - self.l_q) * i_d_best) * i_q_best
else:
i_d_idx = np.where(psi_ ** 2 - np.power(self.psi_p + self.l_d * i_d, 2) >= 0)
i_d_ = i_d[i_d_idx]
# calculate all possible i_q currents for i_d currents
i_q = np.sqrt(psi_ ** 2 - np.power(self.psi_p + self.l_d * i_d_, 2)) / self.l_q
i_idx = np.where(np.sqrt(np.power(i_q / self.nominal_values[self.i_sq_idx], 2) + np.power(
i_d_ / self.nominal_values[self.i_sd_idx], 2)) <= 1)
i_d_ = i_d_[i_idx]
i_q = i_q[i_idx]
torque = 1.5 * self.p * (self.psi_p + (self.l_d - self.l_q) * i_d_) * i_q
# choose the maximum torque
if np.size(torque) > 0:
t = np.amax(torque)
i_idx = np.where(torque == t)[0][0]
i_d_best = i_d_[i_idx]
i_q_best = i_q[i_idx]
if np.sqrt(i_d_best**2 + i_q_best**2) <= self.nominal_values[self.i_sq_idx]:
psi_i_d_q.append([psi_, t, i_d_best, i_q_best])
psi_i_d_q = np.array(psi_i_d_q)
self.psi_max_mtpf = np.max(psi_i_d_q[:, 0])
psi_i_d_q_neg = np.rot90(np.array([psi_i_d_q[:, 0], -psi_i_d_q[:, 1], psi_i_d_q[:, 2], -psi_i_d_q[:, 3]]))
psi_i_d_q = np.append(psi_i_d_q_neg, psi_i_d_q, axis=0)
return np.array(psi_i_d_q)
self.mtpc = mtpc()
self.mtpf = mtpf()
self.psi_t = np.sqrt(
np.power(self.psi_p + self.l_d * self.mtpc[:, 1], 2) + np.power(self.l_q * self.mtpc[:, 2], 2))
self.psi_t = np.array([self.mtpc[:, 0], self.psi_t])
self.i_q_max = np.linspace(-self.nominal_values[self.i_sq_idx], self.nominal_values[self.i_sq_idx], self.i_count)
self.i_d_max = -np.sqrt(self.nominal_values[self.i_sq_idx] ** 2 - | np.power(self.i_q_max, 2) | numpy.power |
import math
import os
import re
from multiprocessing import Pool
from collections import defaultdict
import tables
import numpy as np
from astropy.io import fits
from astropy.table import Table
from beast.observationmodel.noisemodel.generic_noisemodel import get_noisemodelcat
from beast.physicsmodel.grid import SEDGrid
# from beast.external import eztables
from beast.fitting.fit import save_pdf1d
from beast.fitting.fit_metrics import percentile
from beast.tools import read_beast_data
def uniform_slices(num_points, num_slices):
q = num_points // num_slices
r = num_points % num_slices
slices = []
for i in range(num_slices):
if i < r:
start = i * (q + 1)
stop = start + q + 1
# After the remainder has been taken care of, do strides of q
else:
start = r * (q + 1) + (i - r) * q
stop = start + q
slices.append(slice(start, stop))
return slices
def split_grid(grid_fname, num_subgrids, overwrite=False):
"""
Splits a spectral or sed grid (they are the same class actually)
according to grid point index (so basically, arbitrarily).
Parameters
----------
grid_fname: string
file name of the existing grid to be split up
num_subgrids: integer
the number of parts the grid should be split into
overwrite: bool
any subgrids that already exist will be deleted if set to True.
If set to False, skip over any grids that are already there.
Returns
-------
list of string
the names of the newly created subgrid files
"""
g = SEDGrid(grid_fname, backend="disk")
fnames = []
num_seds = len(g.seds)
slices = uniform_slices(num_seds, num_subgrids)
for i, slc in enumerate(slices):
subgrid_fname = grid_fname.replace(".hd5", "sub{}.hd5".format(i))
fnames.append(subgrid_fname)
if os.path.isfile(subgrid_fname):
if overwrite:
os.remove(subgrid_fname)
else:
print("{} already exists. Skipping.".format(subgrid_fname))
continue
print("constructing subgrid " + str(i))
# Load a slice as a SEDGrid object
sub_g = SEDGrid(
g.lamb[:], seds=g.seds[slc], grid=Table(g.grid[slc]), backend="memory",
)
if g.filters is not None:
sub_g.header["filters"] = " ".join(g.filters)
# Save it to a new file
sub_g.write(subgrid_fname, append=False)
return fnames
def merge_grids(seds_fname, sub_names):
"""
Merges a set of grids into one big grid. The grids need to have the
same columns
Parameters
----------
seds_fname: string
path for the output file
sub_names: list of strings
paths for the input grids
"""
if not os.path.isfile(seds_fname):
for n in sub_names:
print("Appending {} to {}".format(n, seds_fname))
g = SEDGrid(n)
g.write(seds_fname, append=True)
else:
print("{} already exists".format(seds_fname))
def subgrid_info(grid_fname, noise_fname=None):
"""
Generates a list of mins and maxes of all the quantities in the given grid
Parameters
----------
grid_fname: string
path to a beast grid file (hd5 format)
noise_fname: string
Path to the noise model file for the given grid (hd5 format)
(optional). If this is given, the mins/maxes for the full model
fluxes are added too, under the name 'log'+filter+'_wd_bias'
(needs to conform to the name used in fit.py).
Returns
-------
info_dict: dictionary
{name of quantity [string]: {'min': min, 'max': max, 'unique': unique values}}
"""
# Use the disk backend to minimize the memory usage
sedgrid = SEDGrid(grid_fname, backend="disk")
seds = sedgrid.seds
info_dict = {}
qnames = sedgrid.keys()
for q in qnames:
qvals = sedgrid[q]
qmin = np.amin(qvals)
qmax = np.amax(qvals)
qunique = np.unique(qvals)
info_dict[q] = {}
info_dict[q]["min"] = qmin
info_dict[q]["max"] = qmax
info_dict[q]["unique"] = qunique
if noise_fname is not None:
noisemodel = get_noisemodelcat(noise_fname)
# The following is also in fit.py, so we're kind of doing double
# work here, but it's necessary if we want to know the proper
# ranges for these values.
full_model_flux = seds[:] + noisemodel["bias"]
logtempseds = np.array(full_model_flux)
full_model_flux = (
np.sign(logtempseds)
* np.log1p(np.abs(logtempseds * math.log(10)))
/ math.log(10)
)
filters = sedgrid.filters
for i, f in enumerate(filters):
f_fluxes = full_model_flux[:, i]
# Be sure to cut out the -100's in the calculation of the minimum
qmin = np.amin(f_fluxes[f_fluxes > -99.99])
qmax = np.amax(f_fluxes)
qunique = np.unique(qvals)
q = "symlog" + f + "_wd_bias"
info_dict[q] = {}
info_dict[q]["min"] = qmin
info_dict[q]["max"] = qmax
info_dict[q]["unique"] = qunique
print("Gathered grid info for {}".format(grid_fname))
return info_dict
def unpack_and_subgrid_info(x):
"""
Utility to call this function in parallel, with multiple arguments
"""
return subgrid_info(*x)
def reduce_grid_info(grid_fnames, noise_fnames=None, nprocs=1, cap_unique=1000):
"""
Computes the total minimum and maximum of the necessary quantities
across all the subgrids. Can run in parallel.
Parameters
----------
grid_fnames: list of str
subgrid file paths
noise_fnames: list of str (optional)
noise file for each subgrid
nprocs: int
Number of processes to use
cap_unique: int
Stop keeping track of the number of unique values once it
reaches this cap. This reduces the memory usage. (Typically, for
the fluxes, there are as many unique values as there are grid
points. Since we need to store all these values to check if
they're unique, a whole column of the grid is basically being
stored. This cap fixes this, and everything should keep working
in the rest of the code as long as cap_unique is larger than
whatever number of bins is being used.).
Returns
-------
info_dict: dictionary
{name of quantity: (min, max), ...}
"""
# Gather the mins and maxes for the subgrid
if noise_fnames is None:
arguments = [(g, None) for g in grid_fnames]
else:
arguments = list(zip(grid_fnames, noise_fnames))
# Use generators here for memory efficiency
parallel = nprocs > 1
if parallel:
p = Pool(nprocs)
info_dicts_generator = p.imap(unpack_and_subgrid_info, arguments)
else:
info_dicts_generator = (subgrid_info(*a) for a in arguments)
# Assume that all info dicts have the same keys
first_info_dict = next(info_dicts_generator)
qs = [q for q in first_info_dict]
union_min = {}
union_max = {}
union_unique = {}
# This last field can take up a lot of memory. A solution would be
# to allow a maximum number of values (50 is the default maximum
# number of bins anyway, and this value is needed to determine the
# number of bins).
for q in qs:
# Combine the values of the first subgrid
union_min[q] = first_info_dict[q]["min"]
union_max[q] = first_info_dict[q]["max"]
union_unique[q] = first_info_dict[q]["unique"]
# And all the other subgrids (the generator just continues)
for individual_dict in info_dicts_generator:
for q in qs:
union_min[q] = min(union_min[q], individual_dict[q]["min"])
union_max[q] = max(union_max[q], individual_dict[q]["max"])
if len(union_unique[q]) < cap_unique:
union_unique[q] = np.union1d(
union_unique[q], individual_dict[q]["unique"]
)
result_dict = {}
for q in qs:
result_dict[q] = {
"min": union_min[q],
"max": union_max[q],
"num_unique": len(union_unique[q]),
}
return result_dict
def merge_pdf1d_stats(
subgrid_pdf1d_fnames, subgrid_stats_fnames, re_run=False, output_fname_base=None
):
"""
Merge a set of 1d pdfs that were generated by fits on different
grids. It is necessary (and checked) that all the 1d pdfs have the
same limits, bin values, and number of bins.
The stats files are also combined; some values for the total grid
can be calculated by simply comparing them across all the grids,
others are recalculated after obtaining the new 1dpdfs.
Parameters
----------
subgrid_pdf1d_fnames: list of string
file names of all the pdf1d fits files
subgrid_stats_fnames: list of string
file names of the stats files. Should be in the same order as
subgrid_pdf1d_fnames. These files are needed to help with
averaging the pdf1d files as they contain the total weight of
each subgrid.
re_run: boolean (default=False)
If True, re-run the merging, even if the merged files already
exist. If False, will only merge files if they don't exist.
output_fname_base: string (default=None)
If set, this will prepend the output 1D PDF and stats file names
Returns
-------
merged_pdf1d_fname, merged_stats_fname: string, string
file name of the resulting pdf1d and stats fits files (newly
created by this function)
"""
# -------------
# before running, check if the files already exist
# (unless the user wants to re-create them regardless)
# 1D PDF
if output_fname_base is not None:
pdf1d_fname = output_fname_base + "_pdf1d.fits"
else:
pdf1d_fname = "combined_pdf1d.fits"
# stats
if output_fname_base is None:
stats_fname = "combined_stats.fits"
else:
stats_fname = output_fname_base + "_stats.fits"
if (
os.path.isfile(pdf1d_fname)
and os.path.isfile(stats_fname)
and (re_run is False)
):
print(str(len(subgrid_pdf1d_fnames)) + " files already merged, skipping")
return pdf1d_fname, stats_fname
# -------------
nsubgrids = len(subgrid_pdf1d_fnames)
if not len(subgrid_stats_fnames) == nsubgrids:
raise AssertionError()
nbins = {}
with fits.open(subgrid_pdf1d_fnames[0]) as hdul_0:
# Get this useful information
qnames = [hdu.name for hdu in hdul_0[1:]]
nbins = {q: hdul_0[q].data.shape[1] for q in qnames}
bincenters = {q: hdul_0[q].data[-1, :] for q in qnames}
nobs = hdul_0[qnames[0]].data.shape[0] - 1
# Check the following bin parameters for each of the other
# subgrids
for pdf1d_f in subgrid_pdf1d_fnames[1:]:
with fits.open(pdf1d_f) as hdul:
for q in qnames:
pdf1d_0 = hdul_0[q].data
pdf1d = hdul[q].data
# the number of bins
if not pdf1d_0.shape[1] == pdf1d.shape[1]:
raise AssertionError()
# the number of stars + 1
if not pdf1d_0.shape[0] == pdf1d.shape[0]:
raise AssertionError()
# the bin centers (stored in the last row of the
# image) should be equal (or both nan)
if not (
np.isnan(pdf1d_0[-1, 0])
and np.isnan(pdf1d[-1, 0])
or (pdf1d_0[-1, :] == pdf1d[-1, :]).all()
):
raise AssertionError()
# Load all the stats files
stats = [Table.read(f, hdu=1) for f in subgrid_stats_fnames]
try:
filters_tab = Table.read(subgrid_stats_fnames[0], hdu=2)
except ValueError:
filters_tab = None
# First, let's read the arrays of weights (each subgrid has an array
# of weights, containing one weight for each source).
logweight = np.zeros((nobs, nsubgrids))
for i, s in enumerate(stats):
logweight[:, i] = s["total_log_norm"]
# Best grid for each star (take max along grid axis)
maxweight_index_per_star = np.argmax(logweight, axis=1)
# Grab the max values, too
max_logweight = logweight[range(len(logweight)), maxweight_index_per_star]
# Get linear weights for each object/grid. By casting the maxima
# into a column shape, the subtraction will be done for each column
# (broadcasted).
weight = np.exp(logweight - max_logweight[:, np.newaxis])
# ------------------------------------------------------------------------
# PDF1D
# ------------------------------------------------------------------------
# We will try to reuse the save function defined in fit.py
save_pdf1d_vals = []
for i, q in enumerate(qnames):
# Prepare the ouput array
save_pdf1d_vals.append(np.zeros((nobs + 1, nbins[q])))
# Copy the bin centers
save_pdf1d_vals[i][-1, :] = bincenters[q]
# Now, go over all the pdf1d files, and sum the weighted pdf1d values
for g, pdf1d_f in enumerate(subgrid_pdf1d_fnames):
with fits.open(pdf1d_f) as hdul:
for i, q in enumerate(qnames):
pdf1d_g = hdul[q].data[:-1, :]
weight_column = weight[:, [g]] # use [g] to keep dimension
save_pdf1d_vals[i][:-1, :] += pdf1d_g * weight_column
# Normalize all the pdfs of the final result
for i in range(len(save_pdf1d_vals)):
# sum for each source in a column
norms_col = np.sum(save_pdf1d_vals[i][:-1, :], axis=1, keepdims=True)
# non zero mask as 1d array
nonzero = norms_col[:, 0] > 0
save_pdf1d_vals[i][:-1][nonzero, :] /= norms_col[nonzero]
# Save the combined 1dpdf file
save_pdf1d(pdf1d_fname, save_pdf1d_vals, qnames)
# ------------------------------------------------------------------------
# STATS
# ------------------------------------------------------------------------
# Grid with highest Pmax, for each star
pmaxes = np.zeros((nobs, nsubgrids))
for gridnr in range(nsubgrids):
pmaxes[:, gridnr] = stats[gridnr]["Pmax"]
max_pmax_index_per_star = pmaxes.argmax(axis=1)
# Rebuild the stats
stats_dict = {}
for col in stats[0].colnames:
suffix = col.split("_")[-1]
if suffix == "Best":
# For the best values, we take the 'Best' value of the grid
# with the highest Pmax
stats_dict[col] = [
stats[gridnr][col][e]
for e, gridnr in enumerate(max_pmax_index_per_star)
]
elif suffix == "Exp":
# Sum and weigh the expectation values
stats_dict[col] = np.zeros(nobs)
total_weight_per_star = np.zeros(nobs)
for gridnr, s in enumerate(stats):
grid_weight_per_star = weight[:, gridnr]
stats_dict[col] += stats[gridnr][col] * grid_weight_per_star
total_weight_per_star += grid_weight_per_star
stats_dict[col] /= total_weight_per_star
elif re.compile(r"p\d{1,2}$").match(suffix):
# Grab the percentile value
digits = suffix[1:]
p = int(digits)
# Find the correct quantity (the col name without the
# '_'+suffix), and its position in save_pdf1d_vals.
qname = col[: -len(suffix) - 1]
qindex = qnames.index(qname)
# Recalculate the new percentiles from the newly obtained
# 1dpdf. For each star, call the percentile function.
stats_dict[col] = np.zeros(nobs)
for e in range(nobs):
bins = save_pdf1d_vals[qindex][-1]
vals = save_pdf1d_vals[qindex][e]
if vals.max() > 0:
stats_dict[col][e] = percentile(bins, [p], vals)[0]
else:
stats_dict[col][e] = 0
elif col == "chi2min":
# Take the lowest chi2 over all the grids
all_chi2s = np.zeros((nobs, nsubgrids))
for gridnr, s in enumerate(stats):
all_chi2s[:, gridnr] = s[col]
stats_dict[col] = np.amin(all_chi2s, axis=1)
elif col == "Pmax":
all_pmaxs = np.zeros((nobs, nsubgrids))
for gridnr, s in enumerate(stats):
all_pmaxs[:, gridnr] = s[col]
stats_dict[col] = np.amax(all_pmaxs, axis=1)
elif col == "Pmax_indx":
# index of the Pmax (to be useful, must be combined with best_gridsub_tag)
all_pmax_ind = np.zeros((nobs, nsubgrids), dtype=int)
for gridnr, s in enumerate(stats):
all_pmax_ind[:, gridnr] = s[col]
stats_dict[col] = all_pmax_ind[np.arange(nobs), max_pmax_index_per_star]
elif col == "total_log_norm":
stats_dict[col] = np.log(weight.sum(axis=1)) + max_logweight
# For anything else, just copy the values from grid 0. Except
# for the index fields. Those don't make sense when using
# subgrids. They might in the future though. The grid split
# function and some changes to the processesing might help with
# this. Actually specgrid_indx might make sense, since in my
# particular case I'm splitting after the spec grid has been
# created. Still leaving this out though.
elif not col == "chi2min_indx" and not col == "specgrid_indx":
stats_dict[col] = stats[0][col]
# also save the highest Pmax grid number
stats_dict["best_gridsub_tag"] = max_pmax_index_per_star
# save table to a file
ohdu = fits.HDUList()
ohdu.append(fits.table_to_hdu(Table(stats_dict)))
if filters_tab is not None:
ohdu.append(fits.table_to_hdu(filters_tab))
ohdu.writeto(stats_fname, overwrite=True)
print("Saved combined 1dpdfs in " + pdf1d_fname)
print("Saved combined stats in " + stats_fname)
return pdf1d_fname, stats_fname
def merge_lnp(
subgrid_lnp_fnames, re_run=False, output_fname_base=None, threshold=None,
):
"""
Merge a set of sparsely sampled log likelihood (lnp) files. It is assumed
that they are for each part of a subgrid, such that a given star_# in each
file corresponds to the same star_# in the other file(s). Note that this
should NOT be used to combine files across source density or background bin.
Parameters
----------
subgrid_lnp_fnames: list of string
file names of all the lnp fits files
re_run: boolean (default=False)
If True, re-run the merging, even if the merged files already
exist. If False, will only merge files if they don't exist.
output_fname_base: string (default=None)
If set, this will prepend the output lnp file name
threshold : float (default=None)
If set: for a given star, any lnP values below max(lnP)-threshold will
be deleted
Returns
-------
merged_lnp_fname : string
file name of the resulting lnp fits file (newly created by this function)
"""
# create filename
if output_fname_base is None:
merged_lnp_fname = "combined_lnp.hd5"
else:
merged_lnp_fname = output_fname_base + "_lnp.hd5"
# check if we need to rerun
if os.path.isfile(merged_lnp_fname) and (re_run is False):
print(str(len(subgrid_lnp_fnames)) + " files already merged, skipping")
return merged_lnp_fname
# dictionaries to compile all the info
merged_lnp = defaultdict(list)
merged_subgrid = defaultdict(list)
merged_idx = defaultdict(list)
for fname in subgrid_lnp_fnames:
# extract subgrid number from filename
subgrid_num = [i for i in fname.split("_") if "gridsub" in i][0][7:]
# read in the SED indices and lnP values
lnp_data = read_beast_data.read_lnp_data(fname, shift_lnp=False)
n_lnp, n_star = lnp_data["vals"].shape
# save each star's values into the master dictionary
for i in range(n_star):
merged_lnp["star_" + str(i)] += lnp_data["vals"][:, i].tolist()
merged_idx["star_" + str(i)] += lnp_data["indxs"][:, i].tolist()
merged_subgrid["star_" + str(i)] += np.full(
n_lnp, int(subgrid_num)
).tolist()
# go through each star and remove values that are too small
if threshold is not None:
# keep track of how long the list of good values is
good_list_len = np.zeros(n_star)
# go through each star
for i in range(n_star):
star_label = "star_" + str(i)
# good indices
keep_ind = np.where(
(np.array(merged_lnp[star_label]) - max(merged_lnp[star_label]))
> threshold
)[0]
good_list_len[i] = len(keep_ind)
# save just those
merged_lnp[star_label] = | np.array(merged_lnp[star_label]) | numpy.array |
# Copyright 2017 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import GPflow
import GPflow.conditionals
import convgp.convkernels as ckern
from GPflow import settings
float_type = GPflow.settings.dtypes.float_type
class WeightedMultiChannelConvGP(ckern.Conv):
def __init__(self, basekern, img_size, patch_size, colour_channels=1):
ckern.Conv.__init__(self, basekern, img_size, patch_size, colour_channels)
self.basekern.input_dim = np.prod(patch_size)
self.W = GPflow.param.Param( | np.ones((self.colour_channels, self.num_patches // self.colour_channels)) | numpy.ones |
import numpy as np
from ordered_set import OrderedSet
from text_selection.kld.kld_iterator import KldIterator
from text_selection.selection import FirstKeySelector
def test_empty_indicies__return_empty_set():
data = np.ones(shape=(4, 3), dtype=np.uint32)
data_indicies = OrderedSet()
preselection = np.zeros(data.shape[1], data.dtype)
iterator = KldIterator(
data=data,
data_indices=data_indicies,
key_selector=FirstKeySelector(),
preselection=preselection,
weights=np.array([1, 1, 1], dtype=np.uint32),
)
result = OrderedSet(iterator)
assert result == OrderedSet()
def test_all_equal_returns_all_in_same_key_order():
data = np.ones(shape=(4, 3), dtype=np.uint32)
data_indicies = OrderedSet((0, 2, 1, 3))
preselection = np.zeros(data.shape[1], data.dtype)
iterator = KldIterator(
data=data,
data_indices=data_indicies,
key_selector=FirstKeySelector(),
preselection=preselection,
weights=np.array([1, 1, 1], dtype=np.uint32),
)
result = OrderedSet(iterator)
assert result == OrderedSet((0, 2, 1, 3))
def test_select_zeros_not_first():
data = np.array(
[
[1, 0, 1], # 0
[0, 0, 1], # 4
[0, 1, 0], # 1
[0, 0, 0], # 2
[0, 0, 0], # 3
],
dtype=np.uint32,
)
data_indicies = OrderedSet((0, 1, 2, 3, 4))
preselection = np.zeros(data.shape[1], data.dtype)
iterator = KldIterator(
data=data,
data_indices=data_indicies,
key_selector=FirstKeySelector(),
preselection=preselection,
weights=np.array([1, 1, 1], dtype=np.uint32),
)
result = OrderedSet(iterator)
assert result == OrderedSet((0, 2, 3, 4, 1))
def test_empty_ngrams__returns_same_input_order():
data = | np.empty(shape=(5, 0), dtype=np.uint32) | numpy.empty |
import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = | np.array(y, dtype=dt) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
def boot(data, statistic, R):
from numpy import zeros, arange, mean, std, loadtxt
from numpy.random import randint
from time import time
t = zeros(R); n = len(data); inds = arange(n); t0 = time()
# non-parametric bootstrap
for i in range(R):
t[i] = statistic(data[randint(0,n,n)])
return t
def tsboot(data,statistic,R,l):
from numpy import std, mean, concatenate, arange, loadtxt, zeros, ceil
from numpy.random import randint
from time import time
t = zeros(R); n = len(data); k = int(ceil(float(n)/l));
inds = arange(n); t0 = time()
# time series bootstrap
for i in range(R):
# construct bootstrap sample from
# k chunks of data. The chunksize is l
_data = concatenate([data[j:j+l] for j in randint(0,n-l,k)])[0:n];
t[i] = statistic(_data)
return t
def block(x):
from numpy import log2, zeros, mean, var, sum, loadtxt, arange, \
array, cumsum, dot, transpose, diagonal, floor
from numpy.linalg import inv
# preliminaries
d = log2(len(x))
if (d - floor(d) != 0):
print("Warning: Data size = %g, is not a power of 2." % floor(2**d))
print("Truncating data to %g." % 2**floor(d) )
x = x[:2**int(floor(d))]
d = int(floor(d))
n = 2**d
s, gamma = zeros(d), zeros(d)
mu = | mean(x) | numpy.mean |
## Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import tensorflow as tf
import numpy as np
import os
import utils
import time
from data import *
current_milli_time = lambda: int(round(time.time() * 1000))
BN_EPSILON = 0.001
class Cnn_Dgp(object):
# image_size: [1, 3] int: image_size[0]: height, image_size[1]: width, image_size[2]: depth
# d_out: int: number of class
# n: int: number of residual block ==> number of layer for resnet will be 6n + 2
def __init__(self, feature_extractor, dgp_extractor, image_size, d_out, total_samples, mc_test):
# Define resnet structure and its parameters
self.feature_extractor = feature_extractor
self.dgp_extractor = dgp_extractor
self.image_height = image_size[0]
self.image_width = image_size[1]
self.image_depth = image_size[2]
self.d_out = d_out
self.mc_train = 1
self.mc_test = mc_test
# Define placeholders
self.x = tf.placeholder(tf.float32, shape=[None, self.image_height, self.image_width, self.image_depth])
self.y = tf.placeholder(tf.int32, shape=[None, 1])
self.one_hot = tf.one_hot(tf.reshape(self.y, [-1]), depth=self.d_out, on_value=1.0, off_value=0.0, axis=-1)
self.lr = tf.placeholder(tf.float32, shape=[])
self.total_samples = total_samples
# Obtain real batch size
self.real_batch_size = tf.shape(self.x)[0]
# Define logits and predicted probabilities
self.logits_train = tf.reshape(self.inference(mc=self.mc_train), [self.mc_train, self.real_batch_size, self.d_out]) #[mc_train, batch_size, nbclass]
self.logits_test = tf.reshape(self.inference(mc=self.mc_test), [self.mc_test, self.real_batch_size, self.d_out]) #[mc_test, batch_size, nbclass]
self.pred_probs_train = tf.reduce_mean(tf.nn.softmax(self.logits_train, -1), axis=0) #[batch_size, nbclass]
self.pred_probs_test = tf.reduce_mean(tf.nn.softmax(self.logits_test, -1), axis=0) #[batch_size, nbclass]
# Define loss function
self.loss_train = self.compute_loss_train()
self.mnll_test = self.compute_mnll_test()
self.regu_loss = self.compute_regu_loss()
self.full_loss_train = self.loss_train + self.regu_loss
self.full_loss_test = self.mnll_test + self.regu_loss
# Session
self.session = tf.Session()
self.all_variables = tf.trainable_variables()
self.conv_filters = self.feature_extractor.get_conv_params()
self.omega = self.dgp_extractor.get_omegas()
self.w = self.dgp_extractor.get_w()
self.log_theta_lengthscale, self.log_theta_sigma2 = self.dgp_extractor.get_kernel_param()
# Define the train steps
self.global_step = tf.Variable(0, trainable=False)
self.top1_error_train = self.top_1_error(self.pred_probs_train)
self.top1_error_test = self.top_1_error(self.pred_probs_test)
self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.full_loss_train)
# Saver
self.saver = tf.train.Saver()
def inference_1mc(self):
features = self.feature_extractor.feed_forward(self.x)
output = self.dgp_extractor.feed_forward(features)
return output
def inference(self, mc):
layer_out_mc = self.inference_1mc()
for i in range(mc - 1):
layer_out_mc = tf.concat([layer_out_mc, self.inference_1mc()], axis = 0)
return layer_out_mc
def compute_mnll_test(self):
mnll_test = - tf.reduce_mean(tf.log(tf.reduce_sum(tf.multiply(self.pred_probs_test, self.one_hot), axis=1)))
return mnll_test
## logits: 3d: [mc, batch_size, d_out]
def compute_loss_train(self):
ll = tf.reduce_sum(self.one_hot * self.logits_train, 2) - utils.logsumexp(self.logits_train, 2) #[mc_test, batch_size]
loss_train = -tf.reduce_mean(ll)
return loss_train
def compute_regu_loss(self):
regu_loss_cnn = self.feature_extractor.get_regu_loss()
regu_loss_dgp = self.dgp_extractor.get_regu_loss()
regu_loss = 1.0 / self.total_samples * (regu_loss_cnn + regu_loss_dgp)
return regu_loss
def top_1_error(self, pred_probs):
batch_size = tf.cast(tf.shape(pred_probs)[0], tf.float32)
in_top1 = tf.to_float(tf.nn.in_top_k(pred_probs, tf.reshape(self.y, [-1]), k=1))
num_correct = tf.reduce_sum(in_top1)
return (batch_size - num_correct) / batch_size
def predict(self):
#err = self.top1_error
#one_hot = tf.one_hot(tf.reshape(self.y, [-1]), depth=self.d_out, on_value=1.0, off_value=0.0, axis=-1)
#mnll = - tf.reduce_mean(tf.log(tf.reduce_sum(tf.multiply(self.pred_probs, one_hot), axis=1)))
#return self.pred_probs_test, self.top1_error_test, self.loss_test, self.regu_loss, self.full_loss_test, self.pred_probs_train, self.loss_train, self.full_loss_train
return self.pred_probs_test, self.top1_error_test, self.mnll_test, self.regu_loss, self.full_loss_test
def evaluate(self, testX, testY, test_batch_size, num_bins):
[PRED_PROBS, ERR, MNLL, REGU_LOSS, FULL_LOSS] = self.session.run(self.predict(), feed_dict={self.x: testX[0:test_batch_size], self.y: testY[0:test_batch_size]})
if (test_batch_size < len(testY)):
nb_batch_test = (int)(len(testY) / test_batch_size)
for id_batch in range(nb_batch_test - 1):
[PRED_PROBS_1, ERR_1, MNLL_1, REGU_LOSS_1, FULL_LOSS_1] = self.session.run(self.predict(), \
feed_dict={self.x: testX[(id_batch + 1) * test_batch_size : \
(id_batch + 2) * test_batch_size], \
self.y: testY[(id_batch + 1) * test_batch_size : \
(id_batch + 2) * test_batch_size]})
PRED_PROBS = np.concatenate((PRED_PROBS, PRED_PROBS_1), axis=0)
ERR = ERR + ERR_1
MNLL = MNLL + MNLL_1
REGU_LOSS = REGU_LOSS_1
FULL_LOSS = FULL_LOSS + FULL_LOSS_1
ERR = ERR / nb_batch_test
MNLL = MNLL / nb_batch_test
FULL_LOSS = FULL_LOSS / nb_batch_test
# compute ece and mce
predicted_probs = np.amax(PRED_PROBS, axis=1)
true_false = np.reshape(( | np.argmax(PRED_PROBS, 1) | numpy.argmax |
import warnings
from nose.tools import (assert_equal, assert_greater, assert_in, assert_true,
assert_less, assert_is)
from bluesky.callbacks import collector, CallbackCounter
from bluesky.scans import (AbsListScan, AbsScan, LogAbsScan,
DeltaListScan, DeltaScan, LogDeltaScan,
AdaptiveAbsScan, AdaptiveDeltaScan, Count, Center,
OuterProductAbsScan, InnerProductAbsScan,
OuterProductDeltaScan, InnerProductDeltaScan)
from bluesky.standard_config import ascan, dscan, ct
from bluesky import Msg
from bluesky.examples import motor, det, SynGauss, motor1, motor2
from bluesky.tests.utils import setup_test_run_engine
import asyncio
import time as ttime
import numpy as np
loop = asyncio.get_event_loop()
RE = setup_test_run_engine()
def traj_checker(scan, expected_traj):
actual_traj = []
callback = collector('motor', actual_traj)
RE(scan, subs={'event': callback})
assert_equal(actual_traj, list(expected_traj))
def multi_traj_checker(scan, expected_data):
actual_data = []
def collect_data(name, event):
actual_data.append(event['data'])
RE(scan, subs={'event': collect_data})
assert_equal(actual_data, expected_data)
def test_outer_product_ascan():
motor.set(0)
scan = OuterProductAbsScan([det], motor1, 1, 3, 3, motor2, 10, 20, 2,
False)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
expected_data = [
{'motor2': 10.0, 'det': 1.0, 'motor1': 1.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 1.0},
{'motor2': 10.0, 'det': 1.0, 'motor1': 2.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 2.0},
{'motor2': 10.0, 'det': 1.0, 'motor1': 3.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 3.0}]
yield multi_traj_checker, scan, expected_data
def test_outer_product_ascan_snaked():
motor.set(0)
scan = OuterProductAbsScan([det], motor1, 1, 3, 3, motor2, 10, 20, 2, True)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
expected_data = [
{'motor2': 10.0, 'det': 1.0, 'motor1': 1.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 1.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 2.0},
{'motor2': 10.0, 'det': 1.0, 'motor1': 2.0},
{'motor2': 10.0, 'det': 1.0, 'motor1': 3.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 3.0}]
yield multi_traj_checker, scan, expected_data
def test_inner_product_ascan():
motor.set(0)
scan = InnerProductAbsScan([det], 3, motor1, 1, 3, motor2, 10, 30)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
expected_data = [
{'motor2': 10.0, 'det': 1.0, 'motor1': 1.0},
{'motor2': 20.0, 'det': 1.0, 'motor1': 2.0},
{'motor2': 30.0, 'det': 1.0, 'motor1': 3.0}]
yield multi_traj_checker, scan, expected_data
def test_outer_product_dscan():
scan = OuterProductDeltaScan([det], motor1, 1, 3, 3, motor2, 10, 20, 2,
False)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
motor.set(0)
motor1.set(5)
motor2.set(8)
expected_data = [
{'motor2': 18.0, 'det': 1.0, 'motor1': 6.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 6.0},
{'motor2': 18.0, 'det': 1.0, 'motor1': 7.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 7.0},
{'motor2': 18.0, 'det': 1.0, 'motor1': 8.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 8.0}]
yield multi_traj_checker, scan, expected_data
def test_outer_product_dscan_snaked():
scan = OuterProductDeltaScan([det], motor1, 1, 3, 3, motor2, 10, 20, 2,
True)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
motor.set(0)
motor1.set(5)
motor2.set(8)
expected_data = [
{'motor2': 18.0, 'det': 1.0, 'motor1': 6.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 6.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 7.0},
{'motor2': 18.0, 'det': 1.0, 'motor1': 7.0},
{'motor2': 18.0, 'det': 1.0, 'motor1': 8.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 8.0}]
yield multi_traj_checker, scan, expected_data
def test_inner_product_dscan():
motor.set(0)
motor1.set(5)
motor2.set(8)
scan = InnerProductDeltaScan([det], 3, motor1, 1, 3, motor2, 10, 30)
# Note: motor1 is the first motor specified, and so it is the "slow"
# axis, matching the numpy convention.
expected_data = [
{'motor2': 18.0, 'det': 1.0, 'motor1': 6.0},
{'motor2': 28.0, 'det': 1.0, 'motor1': 7.0},
{'motor2': 38.0, 'det': 1.0, 'motor1': 8.0}]
yield multi_traj_checker, scan, expected_data
def test_ascan():
traj = [1, 2, 3]
scan = AbsListScan([det], motor, traj)
yield traj_checker, scan, traj
def test_dscan():
traj = np.array([1, 2, 3])
motor.set(-4)
scan = DeltaListScan([det], motor, traj)
yield traj_checker, scan, traj - 4
def test_lin_ascan():
traj = np.linspace(0, 10, 5)
scan = AbsScan([det], motor, 0, 10, 5)
yield traj_checker, scan, traj
def test_log_ascan():
traj = np.logspace(0, 10, 5)
scan = LogAbsScan([det], motor, 0, 10, 5)
yield traj_checker, scan, traj
def test_lin_dscan():
traj = | np.linspace(0, 10, 5) | numpy.linspace |
"""
Rewired Elementary Cellular Automata
====================================
The :class:`neet.automata.reca.RewiredECA` implements a variant of an ECA
wherein the neighbors of a give cell can be specified by the user. This
allows one to study, for example, the role of topology in the dynamics of a
network. Every ``ECA`` can be represented as a ``RewiredECA`` with standard
wiring, but all ``RewiredECA`` are *fixed sized* networks.
.. rubric:: Examples
.. doctest:: automata
>>> ca = RewiredECA(30, size=3)
>>> ca.update([0, 1, 0])
[1, 1, 1]
>>> ca = RewiredECA(30, wiring=[[0,1,3], [1,1,1], [2,1,2]])
>>> ca.update([0, 1, 0])
[1, 0, 1]
"""
import numpy as np
from .network import BooleanNetwork
class RewiredECA(BooleanNetwork):
"""
RewiredECA is a class to represent elementary cellular automata rules with
arbitrarily defined topology. Since the topology must be provided,
"""
def __init__(self, code, boundary=None, size=None, wiring=None):
"""
Construct a rewired elementary cellular automaton rule.
.. rubric:: Examples
.. doctest:: automata
>>> reca = RewiredECA(30, size=3)
>>> reca.code
30
>>> reca.size
3
>>> reca.wiring
array([[-1, 0, 1],
[ 0, 1, 2],
[ 1, 2, 3]])
.. doctest:: automata
>>> reca = RewiredECA(30, wiring=[[0,1,2],[-1,0,0],[2,3,1]])
>>> reca.code
30
>>> reca.size
3
>>> reca.wiring
array([[ 0, 1, 2],
[-1, 0, 0],
[ 2, 3, 1]])
:param code: the 8-bit Wolfram code for the rule
:type code: int
:param boundary: the boundary conditions for the CA
:type boundary: tuple or None
:param size: the number of cells in the lattice
:type size: int or None
:param wiring: a wiring matrix
:raises ValueError: if ``size is None and wiring is None``
:raises ValueError: if ``size is not None and wiring is not None``
:raises TypeError: if ``size is not None and not
isinstance(size, int)``
:raises ValueError: if ``size is not None and size <= 0``
:raises TypeError: if ``not isinstance(wiring, list) and not
isinstance(wiring, numpy.ndarray)``
:raises ValueError: if ``wiring`` is not :math:`3 \times N`
:raises ValueError: if ``any(wiring < -1) or any(wiring > N)``
"""
if size is not None and wiring is not None:
raise ValueError("cannot provide size and wiring at the same time")
elif size is not None:
super(RewiredECA, self).__init__(size)
self.code = code
self.boundary = boundary
self.__wiring = np.zeros((3, size), dtype=int)
self.__wiring[0, :] = range(-1, size - 1)
self.__wiring[1, :] = range(0, size)
self.__wiring[2, :] = range(1, size + 1)
elif wiring is not None:
if not isinstance(wiring, (list, np.ndarray)):
raise TypeError("wiring must be a list or an array")
wiring_array = np.copy(wiring)
shape = wiring_array.shape
if wiring_array.ndim != 2:
raise ValueError("wiring must be a matrix")
elif shape[0] != 3:
raise ValueError("wiring must have 3 rows")
elif np.any(wiring_array < -1):
raise ValueError("invalid input node in wiring")
elif np.any(wiring_array > shape[1]):
raise ValueError("invalid input node in wiring")
super(RewiredECA, self).__init__(int(shape[1]))
self.code = code
self.boundary = boundary
self.__wiring = wiring_array
else:
raise ValueError("either size or wiring must be provided")
@property
def code(self):
"""
The Wolfram code of the elementary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30, 5)
>>> eca.code
30
>>> eca.code = 45
>>> eca.code
45
>>> eca.code = 256
Traceback (most recent call last):
...
ValueError: invalid ECA code
:type: int
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
"""
return self.__code
@code.setter
def code(self, code):
if not isinstance(code, int):
raise TypeError("ECA code is not an int")
if 255 < code or code < 0:
raise ValueError("invalid ECA code")
self.__code = code
@property
def boundary(self):
"""
The boundary conditions of the elemenary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30)
>>> eca.boundary
>>> eca.boundary = (0,1)
>>> eca.boundary
(0, 1)
>>> eca.boundary = None
>>> eca.boundary
>>> eca.boundary = [0,1]
Traceback (most recent call last):
...
TypeError: ECA boundary are neither None nor a tuple
:type: ``None`` or tuple
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
return self.__boundary
@boundary.setter
def boundary(self, boundary):
if boundary and not isinstance(boundary, tuple):
raise TypeError("ECA boundary are neither None nor a tuple")
if boundary:
if len(boundary) != 2:
raise ValueError("invalid ECA boundary conditions")
for x in boundary:
if x != 0 and x != 1:
raise ValueError("invalid ECA boundary value")
self.__boundary = boundary
@property
def wiring(self):
"""
The wiring matrix for the rule.
.. rubric:: Examples
.. doctest:: automata
>>> reca = RewiredECA(30, size=3)
>>> reca.wiring
array([[-1, 0, 1],
[ 0, 1, 2],
[ 1, 2, 3]])
>>> eca = RewiredECA(30, wiring=[[0,1],[1,1],[-1,-1]])
>>> eca.wiring
array([[ 0, 1],
[ 1, 1],
[-1, -1]])
:type: ``numpy.ndarray``
"""
return self.__wiring
def _unsafe_update(self, lattice, index=None, pin=None, values=None):
"""
Update the state of the ``lattice``, in place, without
checking the validity of the arguments.
:param lattice: the one-dimensional sequence of states
:param index: the index to update (optional)
:param pin: a sequence of indices to fix (optional)
:param values: a dict of index/value pairs to set (optional)
:returns: the updated lattice
"""
pin_states = pin is not None and pin != []
if self.boundary:
left = self.boundary[0]
right = self.boundary[1]
else:
left = lattice[-1]
right = lattice[0]
code = self.code
wiring = self.wiring
size = len(lattice)
if index is None:
if pin_states:
pinned = | np.asarray(lattice) | numpy.asarray |
import numpy as np
from pyHalo.Rendering.MassFunctions.mass_function_utilities import integrate_power_law_analytic
from pyHalo.Rendering.MassFunctions.mass_function_utilities import WDM_suppression
class GeneralPowerLaw(object):
"""
This class handles computations of a double power law mass function of the form
dn/dm = m^x * (1 + (a * m_c / m)^b)^c
where a, b, and c are constants, and m_c is a characteristic mass scale.
The keywords for a, b, c are a_wdm, b_wdm, and c_wdm, respectively
Lovell 2020 fit this mass function to simulations of Warm Dark Matter cosmologies and find
(a, b, c) = (2.3, 0.8, -1) for central halos and (4.2, 2.5, -0.2) for subhalos
"""
def __init__(self, log_mlow, log_mhigh, power_law_index, draw_poisson, normalization,
log_mc, a_wdm, b_wdm, c_wdm):
if a_wdm is None:
assert b_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
assert c_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
else:
assert b_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
assert c_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
if b_wdm is None:
assert a_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
assert c_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
else:
assert a_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
assert c_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
if c_wdm is None:
assert a_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
assert b_wdm is None, 'If one of a_wdm, b_wdm, or c_wdm is not specified (None), all parameters must be None'
else:
assert a_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
assert b_wdm is not None, 'Must specify values for all three of a_wdm, b_wdm, c_wdm'
if normalization < 0:
raise Exception('normalization cannot be < 0.')
if c_wdm is not None and c_wdm > 0:
raise ValueError('c_wdm should be a negative number (otherwise mass function gets steeper (unphysical)')
if a_wdm is not None and a_wdm < 0:
raise ValueError('a_wdm should be a positive number for suppression factor: '
'( 1 + (a_wdm * m/m_c)^b_wdm)^c_wdm')
if np.any([a_wdm is None, b_wdm is None, c_wdm is None]):
assert log_mc is None, 'If log_mc is specified, must also specify kwargs for a_wdm, b_wdm, c_wdm.' \
'(See documentation in pyHalo/Rendering/MassFunctions/Powerlaw/broken_powerlaw'
self._log_mc = log_mc
self._a_wdm = a_wdm
self._b_wdm = b_wdm
self._c_wdm = c_wdm
self.draw_poisson = draw_poisson
self._index = power_law_index
self._mL = 10 ** log_mlow
self._mH = 10 ** log_mhigh
self._nhalos_mean_unbroken = integrate_power_law_analytic(normalization, 10 ** log_mlow, 10 ** log_mhigh, 0,
power_law_index)
def draw(self):
"""
Draws samples from a double power law distribution between mL and mH of the form
m ^ power_law_index * (1 + (a*mc / m)^b )^c
Physically, the second term multiplying m^power_law_index can be a suppression in the mass function on small
scales.
:param draw_poisson:
:param _index:
:param _mH:
:param _mL:
:param n_draw:
:return:
"""
m = self._sample(self.draw_poisson, self._index, self._mH, self._mL, self._nhalos_mean_unbroken)
if len(m) == 0 or self._log_mc is None:
return m
factor = WDM_suppression(m, 10 ** self._log_mc, self._a_wdm, self._b_wdm, self._c_wdm)
u = np.random.rand(int(len(m)))
inds = np.where(u < factor)
return m[inds]
def _sample(self, draw_poisson, index, mH, mL, n_draw):
"""
Draws samples from a power law distribution between mL and mH
:param draw_poisson:
:param _index:
:param _mH:
:param _mL:
:param n_draw:
:return:
"""
if draw_poisson:
N = np.random.poisson(n_draw)
else:
N = int(round(np.round(n_draw)))
x = np.random.rand(N)
if index == -1:
norm = | np.log(mH / mL) | numpy.log |
import math
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
from src.utils import checker, check_range, convert_to_absolute, create_circular_mask, skew, swap_patches, create_grid_masks
class Augmentor(object):
"""
Modify images according to some augmenting functions. The input is a dictionary with a set of options. The keys are
the operations and the values the range or type of operations. Another option is to pass a dictionary as value where
values is the key for the values. This allows to pass other data such as probability that specifies the probability of
applying the operations, otherwise is set to 1.
Some methods are rewritten from
https://github.com/mdbloice/Augmentor/blob/master/Augmentor/Operations.py
"""
def __init__(self, operations, std_noise=50, seed=None):
'''
Use seed if you want to apply the same transformation to different group of images. For instance, when images and
masks need to be processed.
:param operations: This a dictionary with all the operations. The key are the operations and the values the parameters.
Possible keys and the expected value
- brightness: (min_value, max_value) The values must for brightness must be between 0.05 and 10
- color_balance: (min_value, max_value) color_balance must be between 0 and 10
- contrast: (min_value, max_value) contrast must be between 0 and 10
- flip: 'horizontal' or 'hor', 'vertical' or 'ver', both
- greyscale: []
- grid_mask: (min_x_pos, max_x_pos, min_y_pos, max_y_pos, min_width_square, max_width_square,
min_height_square, max_heigth_square, min_x_distance_between_squares,
max_x_distance_between_squares, min_y_distance_between_squares, max_y_distance_between_squares)
Values must be between 0 to 1 sinc they are relative to the size of the image.
Generally, the initial position should be similar to the distance between squares
This type of augmentations can be used two o three times with different parameters, since it is
good to have a lot of different grids without having too much of the image covered.
- illumination: (min_radius, max_radius, min_magnitude, max_magnitude) -- standard (0.05, 0.1, 100, 200)
- noise: (min_sigma, max_sigma) -- gaussian noise wiht mean 0
- occlusion: (type, min_height, max_height, min_width, max_width) - creates a box of noise to block the image.
The types are hide_and_seek and cutout so far. As extra parameter accepts 'num_patches' which can be a number or a range
By default is 1.
- posterisation: (min_number_levels, max_number_levels) Reduce the number of levels of the image. It is assumed a 255
level (at least it is going to be returned in this way). However, this will perform a reduction to less levels than 255
- rgb swapping: True or False. This opertion swaps the RGB channels randomly
- rotation: (min angle, max angle) - in degrees
- sharpness: (min_value, max_value) - The values must be between -5 and 5, 1 means original image not 0.
- shear: (type, magnitude_min, magnitude_max) types are "random", 'hor', 'ver'. The magnitude are the angles to shear in degrees
- skew: (type, magnitude_min, magnitude_max), where types are: "TILT", "TILT_LEFT_RIGHT", "TILT_TOP_BOTTOM", "CORNER", "RANDOM", "ALL"
- solarise: [] doing a solarisation (max(image) - image)
- translate: (min_x, max_x, min_y, max_y) values are relative to the size of the image (0, 0.1, 0, 0.1)
- whitening: (min_alpha, max_alpha) -- new image is alpha*white_image + (1-alpha) * image
- zoom: (min value, max value) - the values are relative to the current size. So, 1 is the real size image (standard 0.9, 1.1)
Apart from this, the values could be a dictionary where of the form {'values': [values], 'probability': 1, special_parameter: VALUE}
The probability is the ratio of using this operation and special_parameters are indicated in the above descriptions when they have.
:param std_noise: The standard deviation to add to the noise, when using translation, zoom, occlusion, etc. By default: 50
:param seed: A seed to initiate numpy random seed in case of need. By default, None
'''
self.perform_checker = True
self.seed = seed
self._operations = operations
self._std_noise = std_noise
self.skew_types = ["TILT", "TILT_LEFT_RIGHT", "TILT_TOP_BOTTOM", "CORNER", "RANDOM", "ALL"]
self.flip_types = ['VERTICAL', 'VER', 'HORIZONTAL', 'HOR', 'RANDOM', 'ALL']
self.occlusion_types = ['hide_and_seek', 'cutout']
self.illumination_types = ['blob_positive', 'blob_negative', 'blob', 'constant_positive', 'constant_negative',
'constant', 'positive', 'negative', 'all', 'random']
self.initial_prob = {'flip': 0.5, 'solarise': 0.5, 'greyscale': 0.5, 'rgb_swapping': 0.5}
self.numpy_fun = ['blur', 'grid_mask', 'illumination', 'noise', 'occlusion', 'posterisation',
'rgb_swapping', 'sample_pairing', 'translate']
@property
def operations(self):
return self._operations
@operations.setter
def operations(self, operations):
self._operations = operations
self.perform_checker = True
def rescale(self, im):
"""
Rescale an image between 0 and 255
:param im (array): An image or set of images
"""
if np.max(im) == np.min(im):
return (im * 0).astype(np.uint8)
return (255 * (im.astype(np.float) - np.min(im)) / (np.max(im) - np.min(im))).astype(np.uint8)
def run(self, images, **kwargs):
"""
Perform the augmentation on the images
:param images(numpy arrays): A
:param kwargs: (TODO) Extra information to pass to the transformation
:return: The augmented image/s
"""
if self.seed is not None:
np.random.seed(self.seed)
if len(images) == 0:
return images
new_operations = {'numpy': {}, 'pil': {}}
for operation, values in self.operations.items():
# Remove digits from operation
operation2 = ''.join([i for i in operation if not i.isdigit()])
if operation2 in self.numpy_fun:
new_operations['numpy'][operation] = values
else:
new_operations['pil'][operation] = values
get_first_value = False
if not isinstance(images, (set, tuple, list)):
get_first_value = True
images = [images]
channels = []
for image in images:
channel = 0
if len(image.shape) == 3:
channel = image.shape[2]
channels.append(channel)
if not isinstance(image, np.ndarray):
raise TypeError('Images must be of type ndarray')
norm = lambda x: np.squeeze(np.uint8(x)) if np.max(x) > 1.0 else np.uint8(x * 255)
pil_obj = [Image.fromarray(norm(image)) for image in images]
# If some extra data gets modified, like the labels of the images, then it must be returned. The output would be
# a dictionary with the images in the key images and the other parameters with the same name as in the input.
output_extra = {}
for type_data in ['pil', 'numpy']:
operations = new_operations[type_data]
if type_data == 'numpy':
output = []
for i, image in enumerate(pil_obj):
# The number of channels must be preserved, operations with PIL like greyscale or greyscale images
# would removed the dimension and leave a 2D image
channel = channels[i]
image = np.array(image)
if len(image.shape) == 2 and channel == 1:
image = image[..., None]
elif (len(image.shape) == 2 and channel == 3) or (
len(image.shape) == 3 and image.shape[2] == 1 and channel == 3):
image = np.dstack([image, image, image])
output.append(image)
for operation, values in operations.items():
if not isinstance(values, dict):
input_data = {'values': values}
else:
input_data = values
probability = input_data.get('probability')
probability = probability if probability else input_data.get('prob',
self.initial_prob.get(operation, 1.0))
extra_data = {key: value for key, value in input_data.items() if
key not in ['values', 'probability', 'prob']}
for key, val in kwargs.items():
extra_data[key] = val
if np.random.rand(1)[0] < probability:
# Remove digits from operation
operation = ''.join([i for i in operation if not i.isdigit()])
op = getattr(self, operation, None)
if op is not None:
if type_data == 'pil':
pil_obj = op(pil_obj, input_data.get('values', []), **extra_data)
else:
output = op(output, input_data.get('values', []), **extra_data)
# If the operation returns a dictionary, means that a parameters from extra_data has been
# modified, when this happens, extra_data must be updated for future calls and the final
# output should include the modified values.
if isinstance(output, dict):
copy_dict = output
output = copy_dict['images']
for key, value in copy_dict.items():
if key != 'images':
output_extra[key] = value
extra_data[key] = value
else:
print('The operation {} does not exist, Aborting'.format(operation))
# output = [np.array(image) for image in pil_obj]
if get_first_value:
output = output[0]
if output_extra:
output_extra['images'] = output
output = output_extra
return output
def check_images_equal_size(self, images):
"""
Check that all the images have the same size
:param images: A list of images
:return: True if all images have same size otherwise False
"""
get_size = lambda x: x.size
if isinstance(images[0], np.ndarray):
get_size = lambda x: x.shape[:2]
h, w = get_size(images[0])
for image in images:
h1, w1 = get_size(image)
if h1 != h or w1 != w:
return False
return True
def check_groups_images_equal_size(self, images, images2):
"""
Check that all the images in images and images2 has the same size. It assumes that both images and images2
have at least one image
:param images: A list of images (numpy array or PIL)
:param images2: A list of images (numpy or PIL)
"""
output = True
output = output and self.check_images_equal_size(images)
output = output and self.check_images_equal_size(images2)
return output and self.check_images_equal_size([np.array(images[0]), np.array(images2[0])])
def brightness(self, images, values, **kwargs):
"""
Change the birghtness of the image, factors smaller than 0 will make the image darker and a factor greater than 1
will make it brighter. 0 means completely black and it should be avoided
:param images(list or array): A list or array of images, each being 3D
:param values: 2 values. The minimum and maximum change in brightness, the values must be between 0.05 and 10
beynd those points the result are too dark or too bright respectively.
:return: A list with the images with some brightness change
"""
factor = checker('brightness', 'range of brightness', values, 2, 0.05, 10)
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
output = []
for i, image in enumerate(images):
if no_mask_positions[i]:
image_enhancers = ImageEnhance.Brightness(image)
output.append(image_enhancers.enhance(factor))
else:
output.append(image)
return output
def color_balance(self, images, values, **kwargs):
"""
Change the saturation of the image, factors smaller than 1 will make the image darker and a factor greater than 1
will make it brighter. 0 means completely black and it should be avoided
:param images: A list of numpy arrays, each being an image
:param values: A list with 2 values. Minimum value for colour balance (greater than 0)
Maximum value for colour balance (smaller than 10)
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
:return: A list of images with changed colour
"""
factor = checker('brightness', 'range of color_balance', values, 2, 0, 10)
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
output = []
for i, image in enumerate(images):
if no_mask_positions[i]:
image_enhancers = ImageEnhance.Color(image)
output.append(image_enhancers.enhance(factor))
else:
output.append(image)
return output
def contrast(self, images, values, **kwargs):
"""
Change the contrast of the image, factors smaller than 1 will make the image to have a solid color and a factor greater than 1
will make it brighter.
:param images: A list of numpy arrays, each being an image
:param values: A list with 2 values. Minimum value for contrast balance (greater than 0)
Maximum value for contrast balance (smaller than 10)
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
:return: A list of image with changed contrast
"""
factor = checker('contrast', 'range of contrast', values, 2, 0, 10)
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
output = []
for i, image in enumerate(images):
if no_mask_positions[i]:
image_enhancers = ImageEnhance.Contrast(image)
output.append(image_enhancers.enhance(factor))
else:
output.append(image)
return output
def crop(self, images, values, **kwargs):
"""
Perform a crop of the images. The main difference with zoom is that zoom creates a crop from the center of the
image and respects the dimension of the images, in addition it may increase the size of the image.
This operation selects a random position in the image and extract a patch with a random height and width. Both
the height and the width can be restricted to a given range.
:param images: A list of numpy arrays, each being an image
:param values: 4 values: Minimum height of the crop (or both height and width if there are only two values)
Maximum height of the crop (or both height and width if there are only 2 values).
Minimum width of the crop (optional)
Maximum width of the crop (optional)
All the values must be between 0 and 1, meaning a relative crop with respect to
the size of the image.
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
:return:
"""
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
output = []
name = 'crop'
if not self.check_images_equal_size(images):
print('For {}, the size of the images must be the same. Aborting'.format(name))
return images
shape = images[0].size
name_params = ['height', 'width']
for i in range(len(values) // 2):
check_range(name, name_params[i], values[i * 2:(i + 1) * 2], 0, 1)
if len(values) != 2 and len(values) != 4:
raise ValueError('The length of values in crop must be 2 or 4')
if len(values) == 4:
cropped_height = np.random.uniform(values[0], values[1])
cropped_width = np.random.uniform(values[2], values[3])
else:
if values[1] > 1:
raise ValueError('When only two elements are use, the values of the crop must be relative 0 to 1')
cropped_height = np.random.uniform(values[0], values[1])
cropped_width = cropped_height
cropped_height = cropped_height if cropped_height >= 1 else int(cropped_height * shape[0])
cropped_width = cropped_width if cropped_width >= 1 else int(cropped_width * shape[1])
center_w = \
np.random.randint(int(np.ceil(cropped_width / 2.0)), int(np.ceil(shape[1] - cropped_width / 2.0)), 1)[0]
center_h = \
np.random.randint(int(np.ceil(cropped_height / 2.0)), int(np.ceil(shape[0] - cropped_height / 2.0)), 1)[0]
width = int(np.ceil(cropped_width / 2.0))
height = int(np.ceil(cropped_height / 2.0))
for i, image in enumerate(images):
#if no_mask_positions[i]:
image = image.crop((center_h - height, center_w - width, center_h + height, center_w + width))
image = image.resize((shape[0], shape[1]))
# image = Image.fromarray(self.rescale(image))
output.append(image)
return output
def flip(self, images, values, **kwargs):
"""
Flip the image, vertically, horizontally or both
:param images: A list of numpy arrays, each being an image
:param values: 1 value, the type ('horizontal', 'vertical', 'all', 'random')
:param kwargs: None
:return: A list with the flipped images
"""
if isinstance(values, (tuple, list)):
values = values[0]
if values.upper() not in self.flip_types:
raise ValueError('The name {} does not exist for the flip operation. Possible values are: {}'.format(values,
self.flip_types))
if values.lower() == 'random':
values = np.random.choice(['horizontal', 'vertical', 'all'], 1)[0]
if values.lower() == 'horizontal' or values.lower() == 'hor' or values.lower() == 'both' or values.lower() == 'all':
images = [image.transpose(Image.FLIP_LEFT_RIGHT) for image in images]
if values.lower() == 'vertical' or values.lower() == 'ver' or values.lower() == 'both' or values.lower() == 'all':
images = [image.transpose(Image.FLIP_TOP_BOTTOM) for image in images]
return images
def greyscale(self, images, values, **kwargs):
"""
Convert to greyscale with probability one
:param images: A list of numpy arrays, each being an image
:param values: None
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
:return: A list with the images converted into greyscale
"""
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
output = []
for i, image in enumerate(images):
if no_mask_positions[i]:
output.append(ImageOps.grayscale(image))
else:
output.append(image)
return output
def grid_mask(self, images, values, **kwargs):
"""
Add a grid mask to the images following https://arxiv.org/pdf/2001.04086.pdf
:param images: A list of numpy arrays, each being an image
:param values: 8 values: Minimum and maximum value for the initial x position (top left corner of the top left square)
Minimum and maximum value for the initial y position (top left corner of the top left square)
Minimum and maximum value (range) for the width of the square
Minimum and maximum value (range) for the height of the square
Minimum and maximum value (range) for the x distance between square
Minimum and maximum value (range) for the y distance between square
All the values must be between 0 and 1 since they are relative to the image size.
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
use_colour: The colour to use. If the colour is not passed or it is a negative value or greater
than 255, gaussian noise will be used instead.
:return: List of images with occlusions by a grid of masks
"""
use_colour = kwargs.get('use_colour', -1)
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
if not hasattr(values, '__len__') or len(values) != 12:
raise ValueError(
'The number of values for the grid_mask operation must be a list or tuple with 12 values. The range of the initial point, square size and distance between square in x and y for the three of them')
if not self.check_images_equal_size(images):
print('For grid masks, the size of the images must be the same. Aborting')
return images
h, w = images[0].shape[:2]
params = []
name = 'grid_mask'
name_params = ['initial x position', 'initial y position', 'width square', 'height square',
'x distance between squares', 'y distance between squares']
for i in range(len(values) // 2):
param = checker(name, name_params[i], values[i * 2:(i + 1) * 2], 2, 0, 1)
if i % 2 == 0:
param = int(np.ceil(param * w))
else:
param = int(np.ceil(param * h))
params.append(param)
images_to_use = []
for ii in range(len(images)):
if no_mask_positions[ii]:
if use_colour < 0 or use_colour > 255:
im = self._std_noise * np.random.randn(*(images[ii].shape)) + 127.5
im[im < 0] = 0
im[im > 255] = 255
else:
im = use_colour * np.ones(tuple(images[ii].shape))
else:
im = np.zeros(tuple(images[ii].shape))
images_to_use.append(im)
return create_grid_masks(images, params[:2], params[2:4], params[4:], images_to_use,
no_mask_positions.tolist())
def illumination(self, images, values, **kwargs):
"""
Add illumination circles to the image following paper: https://arxiv.org/pdf/1910.08470.pdf
:param images: A list of numpy arrays, each being an image
:param values: 4 values: Minimum and maximum radius (float). The values must be between 0 and 1
Minimum and maximum intensity to add (int). This value cannot be larger than 255 and lower than 0.
:param kwargs: For this operation, the only extra parameter is the whether an image is a mask.
mask_positions: The positions in images that are masks.
:return: A list with the images with some blob of changes in the illumination
"""
name = 'illumination' # inspect.currentframe().f_code.co_name
if not self.check_images_equal_size(images):
print('For {}, the size of the images must be the same. Aborting'.format(name))
return images
param_values = [('radius', 1), ('intensity', 255)]
if not hasattr(values, '__len__') or len(values) != 5:
raise ValueError(
'The number of values for the illumination operation must be a list or tuple with 5 values'.format(
name))
if values[0].lower() not in self.illumination_types:
raise ValueError(
'The name {} does not exist for the flip operation. Possible values are: {}'.format(values[0],
self.illumination_types))
no_mask_positions = np.ones(len(images)).astype(bool)
for pos in kwargs.get('mask_positions', []): no_mask_positions[pos] = False
shape = images[0].shape
for i in range(len(values) // 2):
check_range(name, param_values[i][0], values[i * 2 + 1:(i + 1) * 2 + 1], 0, param_values[i][1])
aux = convert_to_absolute(values[1:3], shape)
values[1] = aux[0]
values[2] = aux[1]
type_illumination = values[0].lower()
if type_illumination == 'random':
type_illumination = np.random.choice(self.illumination_types[:-1], 1)
blob = | np.zeros(shape[:2]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sat May 19 17:13:47 2018
# 数据预处理工具函数,整体功能是读取指定路径下的npy格式数据文件,预处理后生成自己的训练/验证/测试数据
@author: liuhuaqing
"""
import time
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from itertools import product
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
def convert_to_onehot(y,C):
#将label转化为one-hot形式
y = y.astype(int)
y_shape = list(y.shape)
y_shape[-1] = C
y_onehot = np.reshape(np.eye(C)[y.reshape(-1)], y_shape)
return y_onehot.astype(float)
#弹性变形
def elastic_transform_V0(image3D,mask3D,alpha=1,sigma=1):
# 弹性变形函数版本0
shape = image3D.shape
random_state = np.random.RandomState(None)
dx = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
dy = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
dz = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
x,y,z,c = np.meshgrid(np.arange(shape[1]),np.arange(shape[0]),np.arange(shape[2]),np.arange(1))
x,y,z = x[:,:,:,0],y[:,:,:,0],z[:,:,:,0]
indices = np.reshape(y+dy,(-1,1)),np.reshape(x+dx,(-1,1)),np.reshape(z+dz,(-1,1))
image3D_elastic = map_coordinates(image3D,indices,order=1,mode='reflect').reshape(shape)
mask3D_elastic = map_coordinates(mask3D,indices,order=1,mode='reflect').reshape(shape)
return image3D_elastic, mask3D_elastic
def elastic_transform_V1(image3D,alpha=1,sigma=1):
# 弹性变形函数版本1,实现的功能和版本0一样
shape = image3D.shape
random_state = np.random.RandomState(None)
dx = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
dy = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
dz = gaussian_filter((random_state.rand(*shape)*2-1),sigma)*alpha
x,y,z,c = np.meshgrid(np.arange(shape[1]),np.arange(shape[0]),np.arange(shape[2]),np.arange(1))
x,y,z = x[:,:,:,0],y[:,:,:,0],z[:,:,:,0]
indices = np.reshape(y+dy,(-1,1)),np.reshape(x+dx,(-1,1)),np.reshape(z+dz,(-1,1))
image3D_elastic = map_coordinates(image3D,indices,order=1,mode='reflect').reshape(shape)
return image3D_elastic
# 旋转,axis为旋转轴,0,1,2分别代表x,y,z轴
# theta为旋转角度,单位已改为度,非弧度
# center为旋转中心,其为一维np数组[x,y,z],默认值为图像中心点
def rotation(data, axis, theta, c = np.array([]), patch_shape=(64,64,32)):# c代表旋转点
#3D矩阵的旋转(实际仅仅又绕z轴旋转的功能,在该项目中,z轴特指CT图像人体身高方向)
theta = -np.pi * theta / 180
if c.size == 0:
c = np.array([np.floor((data.shape[0]-1)/2), np.floor((data.shape[1]-1)/2), np.floor((data.shape[1]-1)/2)])
s = patch_shape
new_data = np.zeros(s) # 补零
# 绕x轴旋转
if axis == 0:
print('axis==0 not supported')
# 绕y轴旋转
elif axis == 1:
print('axis==1 not supported')
# 绕z轴旋转
else:
c_theta = np.cos(theta)
s_theta = np.sin(theta)
i0,j0 = np.int(-s[0]/2),np.int(-s[1]/2)
i1,j1 = i0+s[0], j0+s[1]
z0 = c[2]-np.floor(patch_shape[2]/2).astype(int)
z1 = z0+patch_shape[2]
for i,j in product( range(i0,i1),range(j0,j1) ):
x = np.floor(i*c_theta-j*s_theta+c[0]).astype(int)
y = np.floor(i*s_theta+j*c_theta+c[1]).astype(int)
new_data[i-i0,j-j0,:] = data[x,y,z0:z1]
return new_data
def calc_c_range(data,patch_shape=(64,64,32)):
# 该函数的作用是:服务于数据扩增中的随机旋转与裁剪,计算允许截取数据块的原始CT范围
c_range = np.zeros([2,3])
c_range[0:2,0:2] = np.vstack(
(np.ceil( np.array([0,0])+np.linalg.norm(np.array(patch_shape)[0:2])/2),
np.floor(np.array(data.shape)[0:2]-1-np.linalg.norm( | np.array(patch_shape) | numpy.array |
# maze example
import collections
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import sys
from collections import defaultdict
import random
import math
def plot_value_function(V, title="Value Function", scale_vmin=0):
"""
Plots the value function as a surface plot.
"""
min_x = min(k[0] for k in V.keys())
max_x = max(k[0] for k in V.keys())
min_y = min(k[1] for k in V.keys())
max_y = max(k[1] for k in V.keys())
x_range = np.arange(min_x, max_x + 1)
y_range = | np.arange(min_y, max_y + 1) | numpy.arange |
# ============================================================================
# ============================================================================
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Author: <NAME>
# E-mail:
# Description: Python implementations of preprocessing techniques.
# Contributors:
# ============================================================================
"""
Module of conversion methods in the preprocessing stage:
- Stitching images.
- Joining images if there is no overlapping.
- Converting a 360-degree sinogram with offset center-of-rotation (COR) to
a 180-degree sinogram.
- Extending a 360-degree sinogram with offset COR for direct reconstruction
instead of converting it to a 180-degree sinogram.
- Converting a 180-degree sinogram to a 360-sinogram.
- Generating a sinogram from a helical data.
"""
import numpy as np
from scipy import interpolate
from scipy.ndimage import shift
import algotom.prep.removal as remo
import algotom.prep.calculation as calc
def make_weight_matrix(mat1, mat2, overlap, side):
"""
Generate a linear-ramp weighting matrix for image stitching.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
overlap : int
Width of the overlap area between two images.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
"""
overlap = int(np.floor(overlap))
wei_mat1 = np.ones_like(mat1)
wei_mat2 = np.ones_like(mat2)
if side == 1:
list_down = np.linspace(1.0, 0.0, overlap)
list_up = 1.0 - list_down
wei_mat1[:, -overlap:] = np.float32(list_down)
wei_mat2[:, :overlap] = np.float32(list_up)
else:
list_down = np.linspace(1.0, 0.0, overlap)
list_up = 1.0 - list_down
wei_mat2[:, -overlap:] = np.float32(list_down)
wei_mat1[:, :overlap] = np.float32(list_up)
return wei_mat1, wei_mat2
def stitch_image(mat1, mat2, overlap, side, wei_mat1=None, wei_mat2=None,
norm=True, total_width=None):
"""
Stitch projection images or sinogram images using a linear ramp.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
overlap : float
Width of the overlap area between two images.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
wei_mat1 : array_like, optional
Weighting matrix used for image 1.
wei_mat2 : array_like, optional
Weighting matrix used for image 2.
norm : bool, optional
Enable/disable normalization before stitching.
total_width : int, optional
Final width of the stitched image.
Returns
-------
array_like
Stitched image.
"""
(nrow1, ncol1) = mat1.shape
(nrow2, ncol2) = mat2.shape
overlap_int = int(np.floor(overlap))
sub_pixel = overlap - overlap_int
if sub_pixel > 0.0:
if side == 1:
mat1 = shift(mat1, (0, sub_pixel), mode='nearest')
mat2 = shift(mat2, (0, -sub_pixel), mode='nearest')
else:
mat1 = shift(mat1, (0, -sub_pixel), mode='nearest')
mat2 = shift(mat2, (0, sub_pixel), mode='nearest')
if nrow1 != nrow2:
raise ValueError("Two images are not at the same height!!!")
if (wei_mat1 is None) or (wei_mat2 is None):
(wei_mat1, wei_mat2) = make_weight_matrix(mat1, mat2, overlap_int, side)
total_width0 = ncol1 + ncol2 - overlap_int
if (total_width is None) or (total_width < total_width0):
total_width = total_width0
mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32)
if side == 1:
if norm is True:
factor1 = np.mean(mat1[:, -overlap_int:])
factor2 = np.mean(mat2[:, :overlap_int])
mat2 = mat2 * factor1 / factor2
mat_comb[:, 0:ncol1] = mat1 * wei_mat1
mat_comb[:, (ncol1 - overlap_int):total_width0] += mat2 * wei_mat2
else:
if norm is True:
factor2 = np.mean(mat2[:, -overlap_int:])
factor1 = np.mean(mat1[:, :overlap_int])
mat2 = mat2 * factor1 / factor2
mat_comb[:, 0:ncol2] = mat2 * wei_mat2
mat_comb[:, (ncol2 - overlap_int):total_width0] += mat1 * wei_mat1
if total_width > total_width0:
mat_comb = np.pad(
mat_comb, ((0, 0), (0, total_width - total_width0)), mode='edge')
return mat_comb
def join_image(mat1, mat2, joint_width, side, norm=True, total_width=None):
"""
Join projection images or sinogram images. This is useful for fixing the
problem of non-overlap between images.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
joint_width : float
Width of the joint area between two images.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
norm : bool
Enable/disable normalization before joining.
total_width : int, optional
Final width of the joined image.
Returns
-------
array_like
Stitched image.
"""
(nrow1, ncol1) = mat1.shape
(nrow2, ncol2) = mat2.shape
joint_int = int(np.floor(joint_width))
sub_pixel = joint_width - joint_int
side = int(side)
if sub_pixel > 0.0:
if side == 1:
mat1 = shift(mat1, (0, sub_pixel), mode='nearest')
mat2 = shift(mat2, (0, -sub_pixel), mode='nearest')
else:
mat1 = shift(mat1, (0, -sub_pixel), mode='nearest')
mat2 = shift(mat2, (0, sub_pixel), mode='nearest')
if nrow1 != nrow2:
raise ValueError("Two images are not at the same height!!!")
total_width0 = ncol1 + ncol2 + joint_int
if (total_width is None) or (total_width < total_width0):
total_width = total_width0
mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32)
if side == 1:
if norm is True:
factor1 = np.mean(mat1[:, -3:])
factor2 = np.mean(mat2[:, :3])
mat2 = mat2 * factor1 / factor2
mat_comb[:, 0:ncol1] = mat1
mat_comb[:, (ncol1 + joint_int):total_width0] += mat2
list_mask = np.zeros(total_width0, dtype=np.float32)
list_mask[ncol1 - 2:ncol1 + joint_int + 3] = 1.0
listx = np.where(list_mask < 1.0)[0]
listy = np.arange(nrow1)
mat = mat_comb[:, listx]
finter = interpolate.interp2d(listx, listy, mat, kind='linear')
listx_miss = np.where(list_mask > 0.0)[0]
if len(listx_miss) > 0:
mat_comb[:, listx_miss] = finter(listx_miss, listy)
else:
if norm is True:
factor2 = np.mean(mat2[:, -3:])
factor1 = | np.mean(mat1[:, :3]) | numpy.mean |
import gym
from gym import spaces
from gym.utils import seeding
from rllab.envs.base import Env
from rllab import spaces
import numpy as np
class TwoRoundNondeterministicRewardEnv(Env):
def __init__(self):
self.reset()
def step(self, action):
rewards = [
[
[-1, 1], # expected value 0
[0, 0, 9] # expected value 3. This is the best path.
],
[
[0, 2], # expected value 1
[2, 3] # expected value 2.5
]
]
assert self.action_space.contains(action)
if self.firstAction is None:
self.firstAction = action
reward = 0
done = False
else:
reward = | np.random.choice(rewards[self.firstAction][action]) | numpy.random.choice |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = | N.array([2,1,4]) | numpy.array |
import numpy as np
from . import ransac
def calc_reprojection_error( pmat, X3d, x2d ):
X3dht = np.hstack( (X3d, np.ones( (len(X3d),1))) ).T
x2dt = x2d.T
X3dht = X3dht
x2dh = np.dot(pmat, X3dht)
found = x2dh[:2]/x2dh[2]
orig = x2dt
reproj_error = np.sqrt(np.sum((found-orig)**2, axis=0)) # L2 norm
return {'mean':np.mean(reproj_error),
'all':reproj_error}
class DltRansacModel:
"""linear system solved using linear least squares
This class serves as an example that fulfills the model interface
needed by the ransac() function.
"""
def __init__(self,X3d,x2d,debug=False):
self.X3dht = np.hstack((X3d, np.ones( (len(X3d),1)))).T
self.x2dt = x2d.T
self.fullB,self.fullc = build_Bc(X3d,x2d)
self.debug = debug
def fit(self, data):
# calculate the DLT given a set of data
# create index into B and c
idx = np.repeat(data,2)*2
idx[1::2] += 1
# now slice our B and c arrays for this partition
B = self.fullB[idx]
c = self.fullc[idx]
DLT_avec_results = np.linalg.lstsq(B,c)
a_vec,residuals = DLT_avec_results[:2]
Mhat = np.array(list(a_vec)+[1])
Mhat.shape=(3,4)
return Mhat
def get_error( self, data, model):
X3dht = self.X3dht[:,data]
x2dh = np.dot(model, X3dht)
x2d = x2dh[:2]/x2dh[2]
found = x2d
orig = self.x2dt[:,data]
reproj_error = np.sum((found-orig)**2, axis=0) # L2 norm
return reproj_error
def build_M(X3d,x2d):
M = []
assert len(X3d)==len(x2d)
assert len(X3d) >= 6 # 2 equations and 11 unknowns means we need 6 points.
for i in range(len(X3d)):
X = X3d[i,0]
Y = X3d[i,1]
Z = X3d[i,2]
x = x2d[i,0]
y = x2d[i,1]
M.append( [X, Y, Z, 1, 0, 0, 0, 0, 0, 0, 0, 0, -x] )
M.append( [0, 0, 0, 0, X, Y, Z, 1, 0, 0, 0, 0, -y] )
M.append( [0, 0, 0, 0, 0, 0, 0, 0, X, Y, Z, 1, -1] )
return np.array(M)
def build_Bc(X3d,x2d):
"""Build matrix B and vector c to solve
We want an equation of the form Bx = c where x is a vector of
unknowns corresponding to the solution to the camera calibration.
"""
B = []
c = []
assert len(X3d)==len(x2d)
if len(X3d) < 6:
print('WARNING: 2 equations and 11 unknowns means we need 6 points!')
for i in range(len(X3d)):
X = X3d[i,0]
Y = X3d[i,1]
Z = X3d[i,2]
x = x2d[i,0]
y = x2d[i,1]
B.append( [X, Y, Z, 1, 0, 0, 0, 0, -x*X, -x*Y, -x*Z] )
B.append( [0, 0, 0, 0, X, Y, Z, 1, -y*X, -y*Y, -y*Z] )
c.append( x )
c.append( y )
return np.array(B), np.array(c)
# def center(P):
# orig_determinant = np.linalg.det
# def determinant( A ):
# return orig_determinant( np.asarray( A ) )
# # camera center
# X = determinant( [ P[:,1], P[:,2], P[:,3] ] )
# Y = -determinant( [ P[:,0], P[:,2], P[:,3] ] )
# Z = determinant( [ P[:,0], P[:,1], P[:,3] ] )
# T = -determinant( [ P[:,0], P[:,1], P[:,2] ] )
# C_ = np.array( [[ X/T, Y/T, Z/T ]] ).T
# return C_
def ransac_dlt(X3d, x2d,
n = 6, # six is minimum
k = 200, # do it 200 times
t = 15.0, # mean reprojection error should be less than 15
d = 8,
debug=False,
):
"""perform the DLT in RANSAC
Params
------
n: the minimum number of data values required to fit the model
k: the maximum number of iterations allowed in the algorithm
t: a threshold value for determining when a data point fits a model
d: the number of close data values required to assert that a model fits well to data
"""
model = DltRansacModel(X3d,x2d)
data = np.arange( len(X3d) )
return ransac.ransac(data,model,n,k,t,d,debug=debug,return_all=True)
def simple_dlt2(X3d, x2d):
normalize = True
if normalize:
# normalize so that SVD has better numerical properties
mx = np.mean(x2d[:,0])
my = np.mean(x2d[:,1])
s = 1.0/((mx+my)*0.5)
N = np.array( [[ s, 0, -s*mx],
[ 0, s, -s*my],
[ 0, 0, 1]])
x2dhT = np.hstack( (x2d, np.ones_like( x2d[:,np.newaxis,0] )) ).T
xt = np.dot(N,x2dhT)
x2dN = xt[:2].T
use_2d = x2dN
else:
use_2d = x2d
M = build_M(X3d,use_2d)
U,s,V = np.linalg.svd(M, full_matrices=False)
a = V[-1]
assert a.shape == (13,)
Mhat = a[:12]
Mhat.shape=(3,4)
Ninv = np.linalg.pinv(N)
if normalize:
Mhat = np.dot( Ninv, Mhat )
assert Mhat.shape==(3,4)
results = dict(#center = center(Mhat).T[0],
pmat = Mhat,
)
return results
def simple_dlt(X3d, x2d):
B,c = build_Bc(X3d,x2d)
DLT_avec_results = np.linalg.lstsq(B,c)
a_vec,residuals = DLT_avec_results[:2]
Mhat = np.array(list(a_vec)+[1])
Mhat.shape=(3,4)
results = dict(#center = center(Mhat).T[0],
pmat = Mhat,
)
return results
def dlt(X3d, x2d, ransac=True):
X3d = np.array(X3d)
x2d = np.array(x2d)
if ransac:
pmat,rd = ransac_dlt(X3d, x2d)
result = dict(#center = center(pmat).T[0],
pmat = pmat,
)
idxs = rd['inliers']
result['X3d'] = X3d[idxs]
result['x2d'] = x2d[idxs]
else:
result = simple_dlt(X3d, x2d)
result['X3d'] = X3d
result['x2d'] = x2d
err = calc_reprojection_error( result['pmat'], result['X3d'], result['x2d'] )
result['mean_reprojection_error'] = err['mean']
result['reprojection_error'] = err['all']
return result
def print_summary(results,n_pts=None):
opts = np.get_printoptions()
| np.set_printoptions(precision=6, linewidth=150, suppress=True) | numpy.set_printoptions |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module that contains an algorithm for 3D scene reconstruction """
import cv2
import numpy as np
import sys
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class SceneReconstruction3D:
"""3D scene reconstruction
This class implements an algorithm for 3D scene reconstruction using
stereo vision and structure-from-motion techniques.
A 3D scene is reconstructed from a pair of images that show the same
real-world scene from two different viewpoints. Feature matching is
performed either with rich feature descriptors or based on optic flow.
3D coordinates are obtained via triangulation.
Note that a complete structure-from-motion pipeline typically includes
bundle adjustment and geometry fitting, which are out of scope for
this project.
"""
def __init__(self, K, dist):
"""Constructor
This method initializes the scene reconstruction algorithm.
:param K: 3x3 intrinsic camera matrix
:param dist: vector of distortion coefficients
"""
self.K = K
self.K_inv = np.linalg.inv(K) # store inverse for fast access
self.d = dist
def load_image_pair(self, img_path1, img_path2, downscale=True):
"""Loads pair of images
This method loads the two images for which the 3D scene should be
reconstructed. The two images should show the same real-world scene
from two different viewpoints.
:param img_path1: path to first image
:param img_path2: path to second image
:param downscale: flag whether to downscale the images to
roughly 600px width (True) or not (False)
"""
self.img1 = cv2.imread(img_path1, cv2.CV_8UC3)
self.img2 = cv2.imread(img_path2, cv2.CV_8UC3)
# make sure images are valid
if self.img1 is None:
sys.exit("Image " + img_path1 + " could not be loaded.")
if self.img2 is None:
sys.exit("Image " + img_path2 + " could not be loaded.")
if len(self.img1.shape) == 2:
self.img1 = cv2.cvtColor(self.img1, cv2.COLOR_GRAY2BGR)
self.img2 = cv2.cvtColor(self.img2, cv2.COLOR_GRAY2BGR)
# scale down image if necessary
# to something close to 600px wide
target_width = 600
if downscale and self.img1.shape[1] > target_width:
while self.img1.shape[1] > 2*target_width:
self.img1 = cv2.pyrDown(self.img1)
self.img2 = cv2.pyrDown(self.img2)
# undistort the images
self.img1 = cv2.undistort(self.img1, self.K, self.d)
self.img2 = cv2.undistort(self.img2, self.K, self.d)
def plot_optic_flow(self):
"""Plots optic flow field
This method plots the optic flow between the first and second
image.
"""
self._extract_keypoints("flow")
img = self.img1
for i in xrange(len(self.match_pts1)):
cv2.line(img, tuple(self.match_pts1[i]), tuple(self.match_pts2[i]),
color=(255, 0, 0))
theta = np.arctan2(self.match_pts2[i][1] - self.match_pts1[i][1],
self.match_pts2[i][0] - self.match_pts1[i][0])
cv2.line(img, tuple(self.match_pts2[i]),
(np.int(self.match_pts2[i][0] - 6*np.cos(theta+np.pi/4)),
np.int(self.match_pts2[i][1] - 6*np.sin(theta+np.pi/4))),
color=(255, 0, 0))
cv2.line(img, tuple(self.match_pts2[i]),
(np.int(self.match_pts2[i][0] - 6*np.cos(theta-np.pi/4)),
np.int(self.match_pts2[i][1] - 6*np.sin(theta-np.pi/4))),
color=(255, 0, 0))
cv2.imshow("imgFlow", img)
cv2.waitKey()
def draw_epipolar_lines(self, feat_mode="SURF"):
"""Draws epipolar lines
This method computes and draws the epipolar lines of the two
loaded images.
:param feat_mode: whether to use rich descriptors for feature
matching ("surf") or optic flow ("flow")
"""
self._extract_keypoints(feat_mode)
self._find_fundamental_matrix()
# Find epilines corresponding to points in right image (second image)
# and drawing its lines on left image
pts2re = self.match_pts2.reshape(-1, 1, 2)
lines1 = cv2.computeCorrespondEpilines(pts2re, 2, self.F)
lines1 = lines1.reshape(-1, 3)
img3, img4 = self._draw_epipolar_lines_helper(self.img1, self.img2,
lines1, self.match_pts1,
self.match_pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
pts1re = self.match_pts1.reshape(-1, 1, 2)
lines2 = cv2.computeCorrespondEpilines(pts1re, 1, self.F)
lines2 = lines2.reshape(-1, 3)
img1, img2 = self._draw_epipolar_lines_helper(self.img2, self.img1,
lines2, self.match_pts2,
self.match_pts1)
cv2.imshow("left", img1)
cv2.imshow("right", img3)
cv2.waitKey()
def plot_rectified_images(self, feat_mode="SURF"):
"""Plots rectified images
This method computes and plots a rectified version of the two
images side by side.
:param feat_mode: whether to use rich descriptors for feature
matching ("surf") or optic flow ("flow")
"""
self._extract_keypoints(feat_mode)
self._find_fundamental_matrix()
self._find_essential_matrix()
self._find_camera_matrices_rt()
R = self.Rt2[:, :3]
T = self.Rt2[:, 3]
#perform the rectification
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(self.K, self.d,
self.K, self.d,
self.img1.shape[:2],
R, T, alpha=1.0)
mapx1, mapy1 = cv2.initUndistortRectifyMap(self.K, self.d, R1, self.K,
self.img1.shape[:2],
cv2.CV_32F)
mapx2, mapy2 = cv2.initUndistortRectifyMap(self.K, self.d, R2, self.K,
self.img2.shape[:2],
cv2.CV_32F)
img_rect1 = cv2.remap(self.img1, mapx1, mapy1, cv2.INTER_LINEAR)
img_rect2 = cv2.remap(self.img2, mapx2, mapy2, cv2.INTER_LINEAR)
# draw the images side by side
total_size = (max(img_rect1.shape[0], img_rect2.shape[0]),
img_rect1.shape[1] + img_rect2.shape[1], 3)
img = np.zeros(total_size, dtype=np.uint8)
img[:img_rect1.shape[0], :img_rect1.shape[1]] = img_rect1
img[:img_rect2.shape[0], img_rect1.shape[1]:] = img_rect2
# draw horizontal lines every 25 px accross the side by side image
for i in range(20, img.shape[0], 25):
cv2.line(img, (0, i), (img.shape[1], i), (255, 0, 0))
cv2.imshow('imgRectified', img)
cv2.waitKey()
def plot_point_cloud(self, feat_mode="SURF"):
"""Plots 3D point cloud
This method generates and plots a 3D point cloud of the recovered
3D scene.
:param feat_mode: whether to use rich descriptors for feature
matching ("surf") or optic flow ("flow")
"""
self._extract_keypoints(feat_mode)
self._find_fundamental_matrix()
self._find_essential_matrix()
self._find_camera_matrices_rt()
# triangulate points
first_inliers = np.array(self.match_inliers1).reshape(-1, 3)[:, :2]
second_inliers = np.array(self.match_inliers2).reshape(-1, 3)[:, :2]
pts4D = cv2.triangulatePoints(self.Rt1, self.Rt2, first_inliers.T,
second_inliers.T).T
# convert from homogeneous coordinates to 3D
pts3D = pts4D[:, :3]/np.repeat(pts4D[:, 3], 3).reshape(-1, 3)
# plot with matplotlib
Ys = pts3D[:, 0]
Zs = pts3D[:, 1]
Xs = pts3D[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Xs, Ys, Zs, c='r', marker='o')
ax.set_xlabel('Y')
ax.set_ylabel('Z')
ax.set_zlabel('X')
plt.title('3D point cloud: Use pan axes button below to inspect')
plt.show()
def _extract_keypoints(self, feat_mode):
"""Extracts keypoints
This method extracts keypoints for feature matching based on
a specified mode:
- "surf": use rich SURF descriptor
- "flow": use optic flow
:param feat_mode: keypoint extraction mode ("surf" or "flow")
"""
# extract features
if feat_mode.lower() == "surf":
# feature matching via SURF and BFMatcher
self._extract_keypoints_surf()
else:
if feat_mode.lower() == "flow":
# feature matching via optic flow
self._extract_keypoints_flow()
else:
sys.exit("Unknown feat_mode " + feat_mode +
". Use 'SURF' or 'FLOW'")
def _extract_keypoints_surf(self):
"""Extracts keypoints via SURF descriptors"""
# extract keypoints and descriptors from both images
detector = cv2.SURF(250)
first_key_points, first_desc = detector.detectAndCompute(self.img1,
None)
second_key_points, second_desc = detector.detectAndCompute(self.img2,
None)
# match descriptors
matcher = cv2.BFMatcher(cv2.NORM_L1, True)
matches = matcher.match(first_desc, second_desc)
# generate lists of point correspondences
first_match_points = np.zeros((len(matches), 2), dtype=np.float32)
second_match_points = np.zeros_like(first_match_points)
for i in range(len(matches)):
first_match_points[i] = first_key_points[matches[i].queryIdx].pt
second_match_points[i] = second_key_points[matches[i].trainIdx].pt
self.match_pts1 = first_match_points
self.match_pts2 = second_match_points
def _extract_keypoints_flow(self):
"""Extracts keypoints via optic flow"""
# find FAST features
fast = cv2.FastFeatureDetector()
first_key_points = fast.detect(self.img1, None)
first_key_list = [i.pt for i in first_key_points]
first_key_arr = np.array(first_key_list).astype(np.float32)
second_key_arr, status, err = cv2.calcOpticalFlowPyrLK(self.img1,
self.img2,
first_key_arr)
# filter out the points with high error
# keep only entries with status=1 and small error
condition = (status == 1) * (err < 5.)
concat = np.concatenate((condition, condition), axis=1)
first_match_points = first_key_arr[concat].reshape(-1, 2)
second_match_points = second_key_arr[concat].reshape(-1, 2)
self.match_pts1 = first_match_points
self.match_pts2 = second_match_points
def _find_fundamental_matrix(self):
"""Estimates fundamental matrix """
self.F, self.Fmask = cv2.findFundamentalMat(self.match_pts1,
self.match_pts2,
cv2.FM_RANSAC, 0.1, 0.99)
def _find_essential_matrix(self):
"""Estimates essential matrix based on fundamental matrix """
self.E = self.K.T.dot(self.F).dot(self.K)
def _find_camera_matrices_rt(self):
"""Finds the [R|t] camera matrix"""
# decompose essential matrix into R, t (See Hartley and Zisserman 9.13)
U, S, Vt = np.linalg.svd(self.E)
W = np.array([0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
1.0]).reshape(3, 3)
# iterate over all point correspondences used in the estimation of the
# fundamental matrix
first_inliers = []
second_inliers = []
for i in range(len(self.Fmask)):
if self.Fmask[i]:
# normalize and homogenize the image coordinates
first_inliers.append(self.K_inv.dot([self.match_pts1[i][0],
self.match_pts1[i][1], 1.0]))
second_inliers.append(self.K_inv.dot([self.match_pts2[i][0],
self.match_pts2[i][1], 1.0]))
# Determine the correct choice of second camera matrix
# only in one of the four configurations will all the points be in
# front of both cameras
# First choice: R = U * Wt * Vt, T = +u_3 (See <NAME> 9.19)
R = U.dot(W).dot(Vt)
T = U[:, 2]
if not self._in_front_of_both_cameras(first_inliers, second_inliers,
R, T):
# Second choice: R = U * W * Vt, T = -u_3
T = - U[:, 2]
if not self._in_front_of_both_cameras(first_inliers, second_inliers,
R, T):
# Third choice: R = U * Wt * Vt, T = u_3
R = U.dot(W.T).dot(Vt)
T = U[:, 2]
if not self._in_front_of_both_cameras(first_inliers,
second_inliers, R, T):
# Fourth choice: R = U * Wt * Vt, T = -u_3
T = - U[:, 2]
self.match_inliers1 = first_inliers
self.match_inliers2 = second_inliers
self.Rt1 = np.hstack((np.eye(3), np.zeros((3, 1))))
self.Rt2 = np.hstack((R, T.reshape(3, 1)))
def _draw_epipolar_lines_helper(self, img1, img2, lines, pts1, pts2):
"""Helper method to draw epipolar lines and features """
if img1.shape[2] == 1:
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
if img2.shape[2] == 1:
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
c = img1.shape[1]
for r, pt1, pt2 in zip(lines, pts1, pts2):
color = tuple(np.random.randint(0, 255, 3).tolist())
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2] + r[0]*c) / r[1]])
cv2.line(img1, (x0, y0), (x1, y1), color, 1)
cv2.circle(img1, tuple(pt1), 5, color, -1)
cv2.circle(img2, tuple(pt2), 5, color, -1)
return img1, img2
def _in_front_of_both_cameras(self, first_points, second_points, rot,
trans):
"""Determines whether point correspondences are in front of both
images"""
rot_inv = rot
for first, second in zip(first_points, second_points):
first_z = np.dot(rot[0, :] - second[0]*rot[2, :],
trans) / np.dot(rot[0, :] - second[0]*rot[2, :],
second)
first_3d_point = np.array([first[0] * first_z,
second[0] * first_z, first_z])
second_3d_point = | np.dot(rot.T, first_3d_point) | numpy.dot |
# Name: <NAME>
# Date: 2 March 2020
# Program: biot_square.py
import numpy as np
import matplotlib.pyplot as plt
import time as time
from matplotlib.patches import Circle
def biot(Rvec, wire, I):
mu_4pi = 10
dB = np.zeros((len(wire), 3))
R = Rvec - wire
Rsqr = np.sum( R**2, axis = 1 )
dL = (np.roll(wire, -1, axis = 0) - np.roll(wire, +1, axis = 0))/2
cr = np.cross(dL, R, axis = 1 )
dB = mu_4pi * I * cr/Rsqr[:,None]**(3/2)
dB = np.concatenate((dB, [dB[0,:]]), axis = 0)
Btot = np.array([simpson(dB[:,0], 1), simpson(dB[:,1], 1), simpson(dB[:,2], 1)])
return Btot
def simpson(f, dr):
total = dr/3*(np.sum(f[1:] + f[:-1]) + 2* | np.sum(f[1::2]) | numpy.sum |
import sys
import csv
import json
import click
from Bio import SeqIO
import numpy as np
from flea.util import js_divergence
from flea.util import column_count
from flea.util import prob
from flea.util import get_date_dict
from flea.util import id_to_copynumber, id_to_label
from flea.util import read_single_record
@click.command()
@click.argument('infile')
@click.argument('mrcafile')
@click.argument('metafile')
@click.argument('outfile')
def main(infile, mrcafile, metafile, outfile):
date_dict = get_date_dict(metafile)
records = list(SeqIO.parse(infile, 'fasta'))
seqs = list(list(str(r.seq)) for r in records)
# assume id has "<timepoint>_<misc>_n_<copynumber>" pattern
copynumbers = list(id_to_copynumber(r.id) for r in records)
seq_dates = list(date_dict[id_to_label(r.id)] for r in records)
# add MRCA
mrca_record = read_single_record(mrcafile, 'fasta', expected=1)
seqs.insert(0, list(str(mrca_record.seq)))
copynumbers.insert(0, 1)
seq_dates.insert(0, 0)
seq_array = np.array(seqs)
copynumber_array = np.array(copynumbers)
date_array = np.array(seq_dates)
# FIXME: labels must sort according to date!!!
dates = list(sorted(set(date_dict.values())))
dates.insert(0, 0)
aas = list(sorted(set(seq_array.ravel())))
alphabet = | np.array(aas) | numpy.array |
import math
import multiprocessing as mp
import random
import string
import time
import gc
import numpy as np
import pandas as pd
import tensorflow as tf
from openea.models.basic_model import BasicModel
from openea.modules.base.initializers import init_embeddings
from openea.modules.base.losses import margin_loss, mapping_loss
from openea.modules.base.optimizers import generate_optimizer, get_optimizer
from openea.modules.finding.evaluation import valid, test, early_stop
from openea.modules.utils.util import load_session
from openea.modules.utils.util import task_divide
from openea.modules.bootstrapping.alignment_finder import find_potential_alignment_greedily, check_new_alignment, search_nearest_k
from openea.modules.finding.similarity import sim
def find_alignment(sub_embeds, embeds, indexes, desc_sim_th):
desc_sim = sim(sub_embeds, embeds, normalize=True)
nearest_k_neighbors = search_nearest_k(desc_sim, 1)
alignment = list()
for i, j in nearest_k_neighbors:
if desc_sim[i, j] >= desc_sim_th:
alignment.append((indexes[i], j))
if len(alignment) == 0:
print("find no new alignment")
return []
# new_alignment_desc_index = find_potential_alignment_greedily(desc_sim, desc_sim_th)
# if new_alignment_desc_index is None or len(new_alignment_desc_index) == 0:
# print("find no new alignment")
# return []
# alignment = [(indexes[i], j) for (i, j) in new_alignment_desc_index]
return alignment
class KDCoE(BasicModel):
def __init__(self):
super().__init__()
self.desc_batch_size = None
self.negative_indication_weight = None
self.wv_dim = None
self.default_desc_length = None
self.word_embed = None
self.desc_sim_th = None
self.sim_th = None
self.word_em = None
self.e_desc = None
self.ref_entities1 = None
self.ref_entities2 = None
self.new_alignment = set()
self.new_alignment_index = set()
def init(self):
assert self.args.alpha > 1
self.desc_batch_size = self.args.desc_batch_size
self.negative_indication_weight = -1. / self.desc_batch_size
self.wv_dim = self.args.wv_dim
self.default_desc_length = self.args.default_desc_length
self.word_embed = self.args.word_embed
self.desc_sim_th = self.args.desc_sim_th
self.sim_th = self.args.sim_th
self.word_em, self.e_desc = self._get_desc_input()
self.ref_entities1 = self.kgs.valid_entities1 + self.kgs.test_entities1
self.ref_entities2 = self.kgs.valid_entities2 + self.kgs.test_entities2
self._define_variables()
self._define_mapping_variables()
self._define_embed_graph()
self._define_mapping_graph()
self._define_mapping_graph_new()
self._define_desc_graph()
self.session = load_session()
tf.global_variables_initializer().run(session=self.session)
def _get_desc_input(self):
list1 = self.kgs.train_entities1 + self.kgs.valid_entities1 + self.kgs.test_entities1
list2 = self.kgs.train_entities2 + self.kgs.valid_entities2 + self.kgs.test_entities2
aligned_dict = dict(zip(list1, list2))
print("aligned dict", len(aligned_dict))
# desc graph settings
start = time.time()
# find desc
model = self
at1 = pd.DataFrame(model.kgs.kg1.attribute_triples_list)
at2 = pd.DataFrame(model.kgs.kg2.attribute_triples_list)
"""
0 1 2
0 22816 168 "4000.1952"^^<http://www.w3.org/2001/XMLSchema...
1 14200 6 "1.82"^^<http://www.w3.org/2001/XMLSchema#double>
2 20874 38 99657
"""
aid1 = pd.Series(list(model.kgs.kg1.attributes_id_dict), index=model.kgs.kg1.attributes_id_dict.values())
aid2 = pd.Series(list(model.kgs.kg2.attributes_id_dict), index=model.kgs.kg2.attributes_id_dict.values())
"""
0 http://xmlns.com/foaf/0.1/name
2 http://dbpedia.org/ontology/birthDate
"""
"""
1 http://dbpedia.org/ontology/years
3 http://dbpedia.org/ontology/appearancesInLeague
"""
uri_name = 'escription' # in Wikidata, the attribute is http://schema.org/description
desc_uris1 = aid1[aid1.str.findall(uri_name).apply(lambda x: len(x)) > 0]
desc_uris2 = aid2[aid2.str.findall(uri_name).apply(lambda x: len(x)) > 0]
"""
8 http://purl.org/dc/elements/1.1/description
462 http://dbpedia.org/ontology/description
464 http://dbpedia.org/ontology/depictionDescription
"""
"""
31 http://dbpedia.org/ontology/depictionDescription
123 http://purl.org/dc/terms/description
183 http://purl.org/dc/elements/1.1/description
"""
desc_ids1 = desc_uris1.index.values.tolist()
desc_ids2 = desc_uris2.index.values.tolist()
"""
[31 123 183]
"""
e_desc1 = at1[at1.iloc[:, 1].isin(desc_ids1)]
e_desc2 = at2[at2.iloc[:, 1].isin(desc_ids2)]
print("kg1 descriptions:", len(e_desc1))
print("kg2 descriptions:", len(e_desc2))
"""
156083 7169 31 <NAME> (2016, rechts)
156127 1285 31 Olk (links) mit<NAME>und<NAME>
"""
e_desc1 = e_desc1.drop_duplicates(subset=0)
e_desc2 = e_desc2.drop_duplicates(subset=0)
print("after drop_duplicates, kg1 descriptions:", len(e_desc1))
print("after drop_duplicates, kg2 descriptions:", len(e_desc2))
ents_w_desc1_list = e_desc1.iloc[:, 0].values.tolist()
ents_w_desc1 = set(ents_w_desc1_list)
ents_w_desc1_index = e_desc1.index.values.tolist()
print("kg1 entities having descriptions:", len(ents_w_desc1))
ents_w_desc2_list = e_desc2.iloc[:, 0].values.tolist()
ents_w_desc2 = set(ents_w_desc2_list)
print("kg2 entities having descriptions:", len(ents_w_desc2))
# drop_desc_index1 = []
# selected_ent2_ids = []
# for i in range(len(ents_w_desc1_list)):
# aligned_ent2 = aligned_dict.get(ents_w_desc1_list[i], None)
# if aligned_ent2 not in ents_w_desc2:
# drop_desc_index1.append(ents_w_desc1_index[i])
# else:
# selected_ent2_ids.append(aligned_ent2)
# e_desc1 = e_desc1.drop(drop_desc_index1)
# e_desc2 = e_desc2[e_desc2.iloc[:, 0].isin(selected_ent2_ids)]
# print("after alignment, kg1 descriptions:", len(e_desc1))
# print("after alignment, kg2 descriptions:", len(e_desc2))
# ents_w_desc1_list = e_desc1.iloc[:, 0].values.tolist()
# ents_w_desc1 = set(ents_w_desc1_list)
# ents_w_desc2_list = e_desc2.iloc[:, 0].values.tolist()
# ents_w_desc2 = set(ents_w_desc2_list)
# print("after alignment, kg1 entities having descriptions:", len(ents_w_desc1))
# print("after alignment, kg2 entities having descriptions:", len(ents_w_desc2))
# prepare desc
e_desc1.iloc[:, 2] = e_desc1.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.split(' ')
e_desc2.iloc[:, 2] = e_desc2.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.split(' ')
"""
155791 [<NAME>, 2003]
155801 [Plattspitzen, <NAME>, Jubiläumsgrat]
"""
name_triples = self._get_local_name_by_name_triple()
names = pd.DataFrame(name_triples)
names.iloc[:, 2] = names.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.split(' ')
names.iloc[e_desc1.iloc[:, 0].values, [1, 2]] = e_desc1.iloc[:, [1, 2]].values
names.iloc[e_desc2.iloc[:, 0].values, [1, 2]] = e_desc2.iloc[:, [1, 2]].values
"""
29998 29998 -1 [Til, Death]
29999 29999 -1 [You, Gotta, Fight, for, Your, Right, to, Party]
"""
# load word embedding
with open(self.word_embed, 'r') as f:
w = f.readlines()
w = pd.Series(w[1:])
we = w.str.split(' ')
word = we.apply(lambda x: x[0])
w_em = we.apply(lambda x: x[1:])
print('concat word embeddings')
word_em = np.stack(w_em.values, axis=0).astype(np.float)
word_em = np.append(word_em, np.zeros([1, 300]), axis=0)
print('convert words to ids')
w_in_desc = []
for l in names.iloc[:, 2].values:
w_in_desc += l
w_in_desc = pd.Series(list(set(w_in_desc)))
un_logged_words = w_in_desc[~w_in_desc.isin(word)]
un_logged_id = len(word)
all_word = pd.concat(
[pd.Series(word.index, word.values),
pd.Series([un_logged_id, ] * len(un_logged_words), index=un_logged_words)])
def lookup_and_padding(x):
default_length = 4
ids = list(all_word.loc[x].values) + [all_word.iloc[-1], ] * default_length
return ids[:default_length]
print('look up desc embeddings')
names.iloc[:, 2] = names.iloc[:, 2].apply(lookup_and_padding)
# entity-desc-embedding dataframe
e_desc_input = pd.DataFrame(np.repeat([[un_logged_id, ] * 4], model.kgs.entities_num, axis=0),
range(model.kgs.entities_num))
e_desc_input.iloc[names.iloc[:, 0].values] = np.stack(names.iloc[:, 2].values)
print('generating desc input costs time: {:.4f}s'.format(time.time() - start))
return word_em, e_desc_input
def _get_local_name_by_name_triple(self, name_attribute_list=None):
if name_attribute_list is None:
if 'D_Y' in self.args.training_data:
name_attribute_list = {'skos:prefLabel', 'http://dbpedia.org/ontology/birthName'}
elif 'D_W' in self.args.training_data:
name_attribute_list = {'http://www.wikidata.org/entity/P373', 'http://www.wikidata.org/entity/P1476'}
else:
name_attribute_list = {}
local_triples = self.kgs.kg1.local_attribute_triples_set | self.kgs.kg2.local_attribute_triples_set
triples = list()
for h, a, v in local_triples:
v = v.strip('"')
if v.endswith('"@eng'):
v = v.rstrip('"@eng')
triples.append((h, a, v))
id_ent_dict = {}
for e, e_id in self.kgs.kg1.entities_id_dict.items():
id_ent_dict[e_id] = e
for e, e_id in self.kgs.kg2.entities_id_dict.items():
id_ent_dict[e_id] = e
name_ids = set()
for a, a_id in self.kgs.kg1.attributes_id_dict.items():
if a in name_attribute_list:
name_ids.add(a_id)
for a, a_id in self.kgs.kg2.attributes_id_dict.items():
if a in name_attribute_list:
name_ids.add(a_id)
for a, a_id in self.kgs.kg1.attributes_id_dict.items():
if a_id in name_ids:
print(a)
for a, a_id in self.kgs.kg2.attributes_id_dict.items():
if a_id in name_ids:
print(a)
local_name_dict = {}
ents = self.kgs.kg1.entities_set | self.kgs.kg2.entities_set
for (e, a, v) in triples:
if a in name_ids:
local_name_dict[e] = v
for e in ents:
if e not in local_name_dict:
local_name_dict[e] = id_ent_dict[e].split('/')[-1].replace('_', ' ')
name_triples = list()
for e, n in local_name_dict.items():
name_triples.append((e, -1, n))
return name_triples
def _define_variables(self):
with tf.variable_scope('relational' + 'embeddings'):
self.ent_embeds = init_embeddings([self.kgs.entities_num, self.args.dim], 'ent_embeds',
self.args.init, self.args.ent_l2_norm)
self.rel_embeds = init_embeddings([self.kgs.relations_num, self.args.dim], 'rel_embeds',
self.args.init, self.args.rel_l2_norm)
def _define_embed_graph(self):
with tf.name_scope('triple_placeholder'):
self.pos_hs = tf.placeholder(tf.int32, shape=[None])
self.pos_rs = tf.placeholder(tf.int32, shape=[None])
self.pos_ts = tf.placeholder(tf.int32, shape=[None])
self.neg_hs = tf.placeholder(tf.int32, shape=[None])
self.neg_rs = tf.placeholder(tf.int32, shape=[None])
self.neg_ts = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('triple_lookup'):
phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs)
prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs)
pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts)
nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs)
nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs)
nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts)
with tf.name_scope('triple_loss'):
self.triple_loss = margin_loss(phs, prs, pts, nhs, nrs, nts, self.args.margin, self.args.loss_norm)
self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_desc_graph(self):
with tf.variable_scope('desc'):
self.desc1 = AM_desc1_batch = tf.placeholder(dtype=tf.float32,
shape=[None, self.default_desc_length, self.wv_dim],
name='desc1')
self.desc2 = AM_desc2_batch = tf.placeholder(dtype=tf.float32,
shape=[None, self.default_desc_length, self.wv_dim],
name='desc2')
gru_1 = tf.contrib.keras.layers.GRU(units=self.wv_dim, return_sequences=True)
gru_5 = tf.contrib.keras.layers.GRU(units=self.wv_dim, return_sequences=True)
conv1 = tf.contrib.keras.layers.Conv1D(filters=self.wv_dim, kernel_size=3, strides=1, activation=tf.tanh,
padding='valid', use_bias=True)
ds3 = tf.contrib.keras.layers.Dense(units=self.wv_dim, activation=tf.tanh, use_bias=True)
self._att1 = att1 = tf.contrib.keras.layers.Dense(units=1, activation='tanh', use_bias=True)
self._att3 = att3 = tf.contrib.keras.layers.Dense(units=1, activation='tanh', use_bias=True)
# gru_+att1
mp1_b = conv1(gru_1(AM_desc1_batch))
mp2_b = conv1(gru_1(AM_desc2_batch))
att1_w = tf.contrib.keras.activations.softmax(att1(mp1_b), axis=-2)
att2_w = tf.contrib.keras.activations.softmax(att1(mp2_b), axis=-2)
size1 = self.default_desc_length
mp1_b = tf.multiply(mp1_b, tf.scalar_mul(size1, att1_w))
mp2_b = tf.multiply(mp2_b, tf.scalar_mul(size1, att2_w))
# gru_+at3
mp1_b = gru_5(mp1_b)
mp2_b = gru_5(mp2_b)
att1_w = tf.contrib.keras.activations.softmax(att3(mp1_b), axis=-2)
att2_w = tf.contrib.keras.activations.softmax(att3(mp2_b), axis=-2)
mp1_b = tf.multiply(mp1_b, att1_w)
mp2_b = tf.multiply(mp2_b, att2_w)
# last ds
ds1_b = tf.reduce_sum(mp1_b, 1)
ds2_b = tf.reduce_sum(mp2_b, 1)
eb_desc_batch1 = tf.nn.l2_normalize(ds3(ds1_b), dim=1)
eb_desc_batch2 = tf.nn.l2_normalize(ds3(ds2_b), dim=1) # tf.nn.l2_normalize(DS4(ds2_b), dim=1)
indicator = np.empty((self.desc_batch_size, self.desc_batch_size), dtype=np.float32)
indicator.fill(self.negative_indication_weight)
| np.fill_diagonal(indicator, 1.) | numpy.fill_diagonal |
#!/usr/bin/env python
# Copyright (c) 2016 <NAME> <<EMAIL>>
# All rights reserved. No warranty, explicit or implicit, provided.
import os
import cv2
import numpy as np
from path import Path as path
import math
import random
import dirtools
import Easy_Image as efr
# Read points from text files in directory
def readPoints(path) :
# Create an array of array of points.
pointsArray = [];
#List all files in the directory and read points from text files one by one
for filePath in sorted(os.listdir(path)):
if filePath.endswith(".txt"):
#Create an array of points.
points = [];
# Read points from filePath
with open(os.path.join(path, filePath)) as file :
for line in file :
x, y = line.split()
points.append((int(x), int(y)))
# Store array of points
pointsArray.append(points)
return pointsArray;
# Read all jpg images in folder.
def readImages(path) :
#Create array of array of images.
imagesArray = [];
#List all files in the directory and read points from text files one by one
for filePath in sorted(os.listdir(path)):
if filePath.endswith(".jpg"):
# Read image found.
img = cv2.imread(os.path.join(path,filePath));
# Convert to floating point
img = np.float32(img)/255.0;
# Add to array of images
imagesArray.append(img);
return imagesArray;
# Compute similarity transform given two sets of two points.
# OpenCV requires 3 pairs of corresponding points.
# We are faking the third one.
def similarityTransform(inPoints, outPoints) :
s60 = math.sin(60*math.pi/180);
c60 = math.cos(60*math.pi/180);
inPts = np.copy(inPoints).tolist();
outPts = np.copy(outPoints).tolist();
xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0];
yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1];
inPts.append([np.int(xin), np.int(yin)]);
xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0];
yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1];
outPts.append([np.int(xout), np.int(yout)]);
tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False);
return tform;
# Check if a point is inside a rectangle
def rectContains(rect, point) :
if point[0] < rect[0] :
return False
elif point[1] < rect[1] :
return False
elif point[0] > rect[2] :
return False
elif point[1] > rect[3] :
return False
return True
# Calculate delanauy triangle
def calculateDelaunayTriangles(rect, points):
# Create subdiv
subdiv = cv2.Subdiv2D(rect);
# Insert points into subdiv
for p in points:
subdiv.insert((p[0], p[1]));
# List of triangles. Each triangle is a list of 3 points ( 6 numbers )
triangleList = subdiv.getTriangleList();
# Find the indices of triangles in the points array
delaunayTri = []
for t in triangleList:
pt = []
pt.append((t[0], t[1]))
pt.append((t[2], t[3]))
pt.append((t[4], t[5]))
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
ind = []
for j in range(0, 3):
for k in range(0, len(points)):
if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
ind.append(k)
if len(ind) == 3:
delaunayTri.append((ind[0], ind[1], ind[2]))
return delaunayTri
def constrainPoint(p, w, h) :
p = ( min( max( p[0], 0 ) , w - 1 ) , min( max( p[1], 0 ) , h - 1 ) )
return p;
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), | np.float32(dstTri) | numpy.float32 |
#!/usr/bin/env python
u"""
MPI_ICESat2_ATL03.py (05/2021)
Read ICESat-2 ATL03 and ATL09 data files to calculate average segment surfaces
ATL03 datasets: Global Geolocated Photons
ATL09 datasets: Atmospheric Characteristics
CALLING SEQUENCE:
mpiexec -np 6 python MPI_ICESat2_ATL03.py ATL03_file ATL09_file
COMMAND LINE OPTIONS:
-O X, --output X: Name and path of output file
-V, --verbose: Verbose output to track progress
-M X, --mode X: Permission mode of files created
REQUIRES MPI PROGRAM
MPI: standardized and portable message-passing system
https://www.open-mpi.org/
http://mpitutorial.com/
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
mpi4py: MPI for Python
http://pythonhosted.org/mpi4py/
http://mpi4py.readthedocs.org/en/stable/
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://h5py.org
http://docs.h5py.org/en/stable/mpi.html
scikit-learn: Machine Learning in Python
http://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
PROGRAM DEPENDENCIES:
convert_delta_time.py: converts from delta time into Julian and year-decimal
fit.py: Utilities for calculating fits from ATL03 Geolocated Photon Data
time.py: Utilities for calculating time operations
utilities.py: download and management utilities for syncing files
classify_photons.py: Yet Another Photon Classifier for Geolocated Photon Data
UPDATE HISTORY:
Updated 05/2021: add photon classifier based on GSFC YAPC algorithms
move surface fit operations into separate module
Updated 02/2021: replaced numpy bool/int to prevent deprecation warnings
Updated 01/2021: time utilities for converting times from JD and to decimal
Updated 12/2020: H5py deprecation warning change to use make_scale
Updated 10/2020: using argparse to set parameters
Updated 09/2020: using reference photon delta time to interpolate ATL09
Updated 08/2020: using convert delta time function to convert to Julian days
Updated 07/2020: "re-tiding" is no longer unnecessary
Updated 06/2020: verify that complementary beam pair is in list of beams
set masks of output arrays after reading from HDF5
add additional beam check within heights groups
Updated 10/2019: changing Y/N flags to True/False
Updated 09/2019: adding segment quality summary variable
Updated 04/2019: updated backup algorithm for when the surface fit fails
estimate both mean and median first photon bias corrections
estimate both mean and median transmit pulse shape corrections
Updated 03/2019: extract a set of ATL09 parameters for each ATL03 segment_ID
Updated 02/2019: procedures following ATBD for first ATL03 release
Written 05/2017
"""
from __future__ import print_function, division
import sys
import os
import re
import h5py
import argparse
import datetime
import numpy as np
import scipy.signal
import scipy.interpolate
import sklearn.neighbors
import sklearn.cluster
from mpi4py import MPI
import icesat2_toolkit.fit
import icesat2_toolkit.time
from icesat2_toolkit.convert_delta_time import convert_delta_time
from yapc.classify_photons import classify_photons
#-- PURPOSE: keep track of MPI threads
def info(rank, size):
print('Rank {0:d} of {1:d}'.format(rank+1,size))
print('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
print('parent process: {0:d}'.format(os.getppid()))
print('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: reads ICESat-2 ATL03 and ATL09 HDF5 files
#-- and computes average heights over segments
def main():
#-- start MPI communicator
comm = MPI.COMM_WORLD
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Read ICESat-2 ATL03 and ATL09 data files to calculate
average segment surfaces
"""
)
#-- command line parameters
#-- first file listed contains the ATL03 file
#-- second file listed is the associated ATL09 file
parser.add_argument('ATL03',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='ICESat-2 ATL03 file to run')
parser.add_argument('ATL09',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='ICESat-2 ATL09 file to run')
#-- use default output file name
parser.add_argument('--output','-O',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Name and path of output file')
#-- verbosity settings
#-- verbose will output information about each output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args = parser.parse_args()
#-- output module information for process
if args.verbose:
info(comm.rank,comm.size)
if args.verbose and (comm.rank==0):
print('{0} -->'.format(args.ATL03))
#-- directory setup
ATL03_dir = os.path.dirname(args.ATL03)
#-- compile regular expression operator for extracting data from ATL03 files
rx1 = re.compile(r'(processed)?(ATL\d+)_(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})'
r'(\d{2})_(\d{4})(\d{2})(\d{2})_(\d{3})_(\d{2})(.*?).h5$')
#-- universal variables
#-- speed of light
c = 299792458.0
#-- associated beam pairs
associated_beam_pair = dict(gt1l='gt1r',gt1r='gt1l',gt2l='gt2r',gt2r='gt2l',
gt3l='gt3r',gt3r='gt3l')
#-- read ICESat-2 ATL03 HDF5 files (extract base parameters)
SUB,PRD,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX=rx1.findall(args.ATL03).pop()
#-- Open the HDF5 file for reading
fileID = h5py.File(args.ATL03, 'r', driver='mpio', comm=comm)
#-- read each input beam within the file
IS2_atl03_beams = []
for gtx in [k for k in fileID.keys() if bool(re.match(r'gt\d[lr]',k))]:
#-- check if subsetted beam contains data
#-- check in both the geolocation and heights groups
try:
fileID[gtx]['geolocation']['segment_id']
fileID[gtx]['heights']['delta_time']
except KeyError:
pass
else:
IS2_atl03_beams.append(gtx)
#-- number of GPS seconds between the GPS epoch
#-- and ATLAS Standard Data Product (SDP) epoch
atlas_sdp_gps_epoch = fileID['ancillary_data']['atlas_sdp_gps_epoch'][:]
#-- which TEP to use for a given spot (convert to 0-based index)
tep_valid_spot = fileID['ancillary_data']['tep']['tep_valid_spot'][:] - 1
tep_pce = ['pce1_spot1','pce2_spot3']
#-- valid range of times for each TEP histogram
tep_range_prim = fileID['ancillary_data']['tep']['tep_range_prim'][:]
#-- save tep parameters for a given beam
tep = {}
#-- variables of interest for generating corrected elevation estimates
Segment_ID = {}
Segment_Index_begin = {}
Segment_PE_count = {}
Segment_Distance = {}
Segment_Length = {}
Segment_Background = {}
#-- fit parameters
Segment_delta_time = {}
Segment_Height = {}
Segment_Land_Ice = {}
Segment_dH_along = {}
Segment_dH_across = {}
Segment_Height_Error = {}
Segment_Land_Ice_Error = {}
Segment_dH_along_Error = {}
Segment_dH_across_Error = {}
Segment_Mean_Median = {}
Segment_X_atc = {}
Segment_X_spread = {}
Segment_Y_atc = {}
Segment_sigma_geo = {}
Segment_Longitude = {}
Segment_Latitude = {}
Segment_N_Fit = {}
Segment_Window = {}
Segment_RDE = {}
Segment_SNR = {}
Segment_Photon_SNR = {}
Segment_Summary = {}
Segment_Iterations = {}
Segment_Clusters = {}
Segment_Source = {}
Segment_Pulses = {}
#-- correction parameters
FPB_mean_corr = {}
FPB_mean_sigma = {}
FPB_median_corr = {}
FPB_median_sigma = {}
mean_dead_time = {}
FPB_n_corr = {}
FPB_cal_corr = {}
TPS_mean_corr = {}
TPS_median_corr = {}
#-- for each input beam within the file
for gtx in sorted(IS2_atl03_beams):
print(gtx) if args.verbose and (comm.rank == 0) else None
#-- beam type (weak versus strong) for time
atlas_beam_type = fileID[gtx].attrs['atlas_beam_type'].decode('utf-8')
n_pixels = 16.0 if (atlas_beam_type == "strong") else 4.0
#-- ATL03 Segment ID
Segment_ID[gtx] = fileID[gtx]['geolocation']['segment_id'][:]
#-- number of valid overlapping ATL03 segments
n_seg = len(Segment_ID[gtx]) - 1
#-- number of photon events
n_pe, = fileID[gtx]['heights']['delta_time'].shape
#-- first photon in the segment (convert to 0-based indexing)
Segment_Index_begin[gtx] = fileID[gtx]['geolocation']['ph_index_beg'][:] - 1
#-- number of photon events in the segment
Segment_PE_count[gtx] = fileID[gtx]['geolocation']['segment_ph_cnt'][:]
#-- along-track distance for each ATL03 segment
Segment_Distance[gtx] = fileID[gtx]['geolocation']['segment_dist_x'][:]
#-- along-track length for each ATL03 segment
Segment_Length[gtx] = fileID[gtx]['geolocation']['segment_length'][:]
#-- ocean tide
fv = fileID[gtx]['geophys_corr']['tide_ocean'].attrs['_FillValue']
tide_ocean = np.ma.array(fileID[gtx]['geophys_corr']['tide_ocean'][:],
fill_value=fv)
tide_ocean.mask = tide_ocean.data == tide_ocean.fill_value
#-- interpolate background photon rate based on 50-shot summation
background_delta_time = fileID[gtx]['bckgrd_atlas']['delta_time'][:]
SPL = scipy.interpolate.UnivariateSpline(background_delta_time,
fileID[gtx]['bckgrd_atlas']['bckgrd_rate'][:],k=3,s=0)
Segment_Background[gtx] = SPL(fileID[gtx]['geolocation']['delta_time'][:])
#-- ATLAS spot number for beam in current orientation
spot = int(fileID[gtx].attrs['atlas_spot_number'])
#-- get ATLAS impulse response variables for the transmitter echo path (TEP)
tep1,tep2 = ('atlas_impulse_response','tep_histogram')
#-- get appropriate transmitter-echo-path histogram for spot
associated_pce = tep_valid_spot[spot-1]
pce = tep_pce[associated_pce]
#-- delta time of TEP histogram
tep_tod, = fileID[tep1][pce][tep2]['tep_tod'][:]
#-- truncate tep to primary histogram (reflection 43-50 ns)
#-- and extract signal tep from noise tep. calculate width of tep
#-- ATL03 recommends subsetting between 15-30 ns to avoid secondary
tep_hist_time = np.copy(fileID[tep1][pce][tep2]['tep_hist_time'][:])
tep_hist = np.copy(fileID[tep1][pce][tep2]['tep_hist'][:])
t_TX,p_TX,W_TX,FWHM,TXs,TXe = icesat2_toolkit.fit.extract_tep_histogram(
tep_hist_time, tep_hist, tep_range_prim)
#-- save tep information and statistics
tep[gtx] = {}
tep[gtx]['pce'] = pce
tep[gtx]['tep_tod'] = tep_tod
tep[gtx]['tx_start'] = TXs
tep[gtx]['tx_end'] = TXe
tep[gtx]['tx_robust_sprd'] = W_TX
tep[gtx]['sigma_tx'] = FWHM
#-- channel dead time and first photon bias table for beam
cal1,cal2 = ('ancillary_data','calibrations')
channel_dead_time = fileID[cal1][cal2]['dead_time'][gtx]['dead_time'][:]
mean_dead_time[gtx] = np.mean(channel_dead_time)
fpb_dead_time = fileID[cal1][cal2]['first_photon_bias'][gtx]['dead_time'][:]
fpb_strength = fileID[cal1][cal2]['first_photon_bias'][gtx]['strength'][:]
fpb_width = fileID[cal1][cal2]['first_photon_bias'][gtx]['width'][:]
fpb_corr = fileID[cal1][cal2]['first_photon_bias'][gtx]['ffb_corr'][:]
#-- calculate first photon bias as a function of strength and width
#-- for the calculated mean dead time of the beam
ndt,ns,nw = np.shape(fpb_corr)
fpb_corr_dead_time = np.zeros((ns,nw))
for s in range(ns):
for w in range(nw):
SPL = scipy.interpolate.UnivariateSpline(fpb_dead_time/1e9,
fpb_corr[:,s,w],k=3,s=0)
fpb_corr_dead_time[s,w] = SPL(mean_dead_time[gtx])
#-- bivariate spline for estimating first-photon bias using CAL-19
CAL19 = scipy.interpolate.RectBivariateSpline(fpb_strength[0,:],
fpb_width[0,:]/1e9, fpb_corr_dead_time/1e12, kx=1, ky=1)
#-- allocate for output segment fit data
fill_value = fileID[gtx]['geolocation']['sigma_h'].attrs['_FillValue']
#-- delta time of fit photons
Distributed_delta_time = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_delta_time.mask = np.ones((n_seg),dtype=bool)
#-- segment fit heights
Distributed_Height = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Height.mask = np.ones((n_seg),dtype=bool)
#-- land ice height corrected for first photon bias and transmit-pulse shape
Distributed_Land_Ice = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Land_Ice.mask = np.ones((n_seg),dtype=bool)
#-- segment fit along-track slopes
Distributed_dH_along = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_along.mask = np.ones((n_seg),dtype=bool)
#-- segment fit height errors
Distributed_Height_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Height_Error.mask = np.ones((n_seg),dtype=bool)
#-- land ice height errors (max of fit or first photon bias uncertainties)
Distributed_Land_Ice_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Land_Ice_Error.mask = np.ones((n_seg),dtype=bool)
#-- segment fit along-track slope errors
Distributed_dH_along_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_along_Error.mask = np.ones((n_seg),dtype=bool)
#-- difference between the mean and median of the residuals from fit height
Distributed_Mean_Median = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Mean_Median.mask = np.ones((n_seg),dtype=bool)
#-- along-track X coordinates of segment fit
Distributed_X_atc = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_X_atc.mask = np.ones((n_seg),dtype=bool)
#-- along-track X coordinate spread of points used in segment fit
Distributed_X_spread = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_X_spread.mask = np.ones((n_seg),dtype=bool)
#-- along-track Y coordinates of segment fit
Distributed_Y_atc = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Y_atc.mask = np.ones((n_seg),dtype=bool)
#-- longitude of fit photons
Distributed_Longitude = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Longitude.mask = np.ones((n_seg),dtype=bool)
#-- latitude of fit photons
Distributed_Latitude = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Latitude.mask = np.ones((n_seg),dtype=bool)
#-- number of photons in fit
Distributed_N_Fit = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_N_Fit.mask = np.ones((n_seg),dtype=bool)
#-- size of the window used in the fit
Distributed_Window = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Window.mask = np.ones((n_seg),dtype=bool)
#-- robust dispersion estimator
Distributed_RDE = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_RDE.mask = np.ones((n_seg),dtype=bool)
#-- signal-to-noise ratio
Distributed_SNR = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_SNR.mask = np.ones((n_seg),dtype=bool)
#-- maximum signal-to-noise ratio from photon classifier
Distributed_Photon_SNR = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Distributed_Photon_SNR.mask = np.ones((n_seg),dtype=bool)
#-- segment quality summary
Distributed_Summary = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Summary.mask = np.ones((n_seg),dtype=bool)
#-- number of iterations for fit
Distributed_Iterations = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Iterations.mask = np.ones((n_seg),dtype=bool)
#-- number of estimated clusters of data
Distributed_Clusters = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Distributed_Clusters.mask = np.ones((n_seg),dtype=bool)
#-- signal source selection
Distributed_Source = np.ma.zeros((n_seg),fill_value=4,dtype=int)
Distributed_Source.mask = np.ones((n_seg),dtype=bool)
#-- number of pulses in segment
Distributed_Pulses = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Pulses.mask = np.ones((n_seg),dtype=bool)
#-- first photon bias estimates
Distributed_FPB_mean_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_mean_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_mean_sigma = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_mean_sigma.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_median_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_median_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_median_sigma = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_median_sigma.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_n_corr = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_FPB_n_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_cal_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_cal_corr.mask = np.ones((n_seg),dtype=bool)
#-- transmit pulse shape bias estimates
Distributed_TPS_mean_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_TPS_mean_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_TPS_median_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_TPS_median_corr.mask = np.ones((n_seg),dtype=bool)
#-- along-track and across-track distance for photon events
x_atc = fileID[gtx]['heights']['dist_ph_along'][:].copy()
y_atc = fileID[gtx]['heights']['dist_ph_across'][:].copy()
#-- photon event heights
h_ph = fileID[gtx]['heights']['h_ph'][:].copy()
#-- for each 20m segment
for j,_ in enumerate(Segment_ID[gtx]):
#-- index for 20m segment j
idx = Segment_Index_begin[gtx][j]
#-- skip segments with no photon events
if (idx < 0):
continue
#-- number of photons in 20m segment
cnt = Segment_PE_count[gtx][j]
#-- add segment distance to along-track coordinates
x_atc[idx:idx+cnt] += Segment_Distance[gtx][j]
#-- iterate over ATLAS major frames
photon_mframes = fileID[gtx]['heights']['pce_mframe_cnt'][:].copy()
pce_mframe_cnt = fileID[gtx]['bckgrd_atlas']['pce_mframe_cnt'][:].copy()
unique_major_frames,unique_index = np.unique(pce_mframe_cnt,return_index=True)
major_frame_count = len(unique_major_frames)
tlm_height_band1 = fileID[gtx]['bckgrd_atlas']['tlm_height_band1'][:].copy()
tlm_height_band2 = fileID[gtx]['bckgrd_atlas']['tlm_height_band2'][:].copy()
#-- photon event weights
Distributed_Weights = np.zeros((n_pe),dtype=np.float64)
#-- run for each major frame (distributed over comm.size # of processes)
for iteration in range(comm.rank, major_frame_count, comm.size):
#-- background atlas index for iteration
idx = unique_index[iteration]
#-- sum of 2 telemetry band widths for major frame
h_win_width = tlm_height_band1[idx] + tlm_height_band2[idx]
#-- photon indices for major frame (buffered by 1 on each side)
i1, = np.nonzero((photon_mframes >= unique_major_frames[iteration]-1) &
(photon_mframes <= unique_major_frames[iteration]+1))
#-- indices for the major frame within the buffered window
i2, = np.nonzero(photon_mframes[i1] == unique_major_frames[iteration])
#-- calculate photon event weights
Distributed_Weights[i1[i2]] = classify_photons(x_atc[i1], h_ph[i1],
h_win_width, i2, K=5, MIN_PH=5, MIN_XSPREAD=1.0,
MIN_HSPREAD=0.01, METHOD='linear')
#-- photon event weights
pe_weights = np.zeros((n_pe),dtype=np.float64)
comm.Allreduce(sendbuf=[Distributed_Weights, MPI.DOUBLE], \
recvbuf=[pe_weights, MPI.DOUBLE], op=MPI.SUM)
Distributed_Weights = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
segment_indices, = np.nonzero((Segment_Index_begin[gtx][:-1] >= 0) &
(Segment_Index_begin[gtx][1:] >= 0))
iteration_count = len(segment_indices)
#-- run for each geoseg (distributed over comm.size # of processes)
for iteration in range(comm.rank, iteration_count, comm.size):
#-- indice for iteration (can run through a subset of segments)
j = segment_indices[iteration]
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
if (Segment_Index_begin[gtx][j] >= 0):
#-- index for segment j
idx = Segment_Index_begin[gtx][j]
#-- number of photons in segment (use 2 ATL03 segments)
c1 = int(Segment_PE_count[gtx][j])
c2 = int(Segment_PE_count[gtx][j+1])
cnt = c1 + c2
#-- time of each Photon event (PE)
segment_times = np.copy(fileID[gtx]['heights']['delta_time'][idx:idx+cnt])
#-- Photon event lat/lon and elevation (re-tided WGS84)
segment_heights = np.copy(h_ph[idx:idx+cnt])
#-- ATL03 pe heights no longer apply the ocean tide
#-- and so "re-tiding" is no longer unnecessary
# segment_heights[:c1] += tide_ocean[j]
# segment_heights[c1:] += tide_ocean[j+1]
segment_lats = np.copy(fileID[gtx]['heights']['lat_ph'][idx:idx+cnt])
segment_lons = np.copy(fileID[gtx]['heights']['lon_ph'][idx:idx+cnt])
#-- Photon event channel and identification
ID_channel = np.copy(fileID[gtx]['heights']['ph_id_channel'][idx:idx+cnt])
ID_pulse = np.copy(fileID[gtx]['heights']['ph_id_pulse'][idx:idx+cnt])
n_pulses = np.unique(ID_pulse).__len__()
frame_number = np.copy(fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx+cnt])
#-- vertical noise-photon density
background_density = 2.0*n_pulses*Segment_Background[gtx][j]/c
#-- along-track X and Y coordinates
distance_along_X = np.copy(x_atc[idx:idx+cnt])
distance_along_Y = np.copy(y_atc[idx:idx+cnt])
#-- check the spread of photons along-track (must be > 20m)
along_X_spread = distance_along_X.max() - distance_along_X.min()
#-- check confidence level associated with each photon event
#-- -2: TEP
#-- -1: Events not associated with a specific surface type
#-- 0: noise
#-- 1: buffer but algorithm classifies as background
#-- 2: low
#-- 3: medium
#-- 4: high
#-- Surface types for signal classification confidence
#-- 0=Land; 1=Ocean; 2=SeaIce; 3=LandIce; 4=InlandWater
ice_sig_conf = np.copy(fileID[gtx]['heights']['signal_conf_ph'][idx:idx+cnt,3])
ice_sig_low_count = np.count_nonzero(ice_sig_conf > 1)
#-- indices of TEP classified photons
ice_sig_tep_pe, = np.nonzero(ice_sig_conf == -2)
#-- photon event weights from photon classifier
segment_weights = pe_weights[idx:idx+cnt]
snr_norm = np.max(segment_weights)
#-- photon event signal-to-noise ratio from photon classifier
photon_snr = np.array(100.0*segment_weights/snr_norm,dtype=int)
Distributed_Photon_SNR.data[j] = np.copy(snr_norm)
Distributed_Photon_SNR.mask[j] = (snr_norm > 0)
#-- photon confidence levels from classifier
pe_sig_conf = np.zeros((cnt),dtype=int)
#-- calculate confidence levels from photon classifier
pe_sig_conf[photon_snr >= 25] = 2
pe_sig_conf[photon_snr >= 60] = 3
pe_sig_conf[photon_snr >= 80] = 4
#-- copy classification for TEP photons
pe_sig_conf[ice_sig_tep_pe] = -2
pe_sig_low_count = np.count_nonzero(pe_sig_conf > 1)
#-- check if segment has photon events classified for land ice
#-- that are at or above low-confidence threshold
#-- and that the spread of photons is greater than 20m
if (pe_sig_low_count > 10) & (along_X_spread > 20):
#-- use density-based spatial clustering in segment
db = sklearn.cluster.DBSCAN(eps=0.5).fit(
np.c_[distance_along_X, segment_heights],
sample_weight=photon_snr)
labels = db.labels_
#-- number of noise photons
noise_photons = list(labels).count(-1)
noise_cluster = 1 if noise_photons else 0
#-- number of photon event clusters in segment
n_clusters = len(set(labels)) - noise_cluster
Distributed_Clusters.data[j] = n_clusters
Distributed_Clusters.mask[j] = (n_clusters > 0)
#-- perform a surface fit procedure
Segment_X = Segment_Distance[gtx][j] + Segment_Length[gtx][j]
valid,fit,centroid = icesat2_toolkit.fit.try_surface_fit(
distance_along_X, distance_along_Y, segment_heights,
pe_sig_conf, Segment_X, SURF_TYPE='linear', ITERATE=20,
CONFIDENCE=[1,0])
#-- indices of points used in final iterated fit
ifit = fit['indices'] if valid else None
if bool(valid) & (np.abs(fit['error'][0]) < 20):
Distributed_Height.data[j] = fit['beta'][0]
Distributed_Height.mask[j] = False
Distributed_dH_along.data[j] = fit['beta'][1]
Distributed_dH_along.mask[j] = False
Distributed_Height_Error.data[j] = fit['error'][0]
Distributed_Height_Error.mask[j] = False
Distributed_dH_along_Error.data[j] = fit['error'][1]
Distributed_dH_along_Error.mask[j] = False
#-- along-track and cross-track coordinates
Distributed_X_atc.data[j] = np.copy(centroid['x'])
Distributed_X_atc.mask[j] = False
Distributed_X_spread.data[j] = np.copy(along_X_spread)
Distributed_X_spread.mask[j] = False
Distributed_Y_atc.data[j] = | np.copy(centroid['y']) | numpy.copy |
import numpy as np
import cv2
import os
import pickle
import torch as t
import h5py
import pandas as pd
from NNsegmentation.models import Segment
from NNsegmentation.data import predict_whole_map
from SingleCellPatch.extract_patches import within_range
from pipeline.segmentation import instance_clustering
from SingleCellPatch.generate_trajectories import frame_matching
import matplotlib
from matplotlib import cm
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import NullLocator
import seaborn as sns
import imageio
from HiddenStateExtractor.vq_vae import VQ_VAE, CHANNEL_MAX, CHANNEL_VAR, prepare_dataset
from HiddenStateExtractor.naive_imagenet import read_file_path, DATA_ROOT
from HiddenStateExtractor.morphology_clustering import select_clean_trajecteories, Kmean_on_short_trajs
from HiddenStateExtractor.movement_clustering import save_traj
import statsmodels.api as sm
import scipy
RAW_DATA_PATH = '/mnt/comp_micro/Projects/CellVAE/Combined'
sites = ['D%d-Site_%d' % (i, j) for j in range(9) for i in range(3, 6)]
def enhance_contrast(mat, a=1.5, b=-10000):
mat2 = cv2.addWeighted(mat, 1.5, mat, 0, -10000)
return mat2
def plot_patch(sample_path, out_path, boundary=False, channel=0):
with h5py.File(sample_path, 'r') as f:
mat = np.array(f['masked_mat'][:, :, channel].astype('uint16'))
mask = np.array(f['masked_mat'][:, :, 2].astype('uint16'))
mat2 = enhance_contrast(mat, 1.5, -10000)
cv2.imwrite(out_path, mat2)
feat = 'save_0005_before'
fs = sorted(pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb')))
trajs = pickle.load(open('./HiddenStateExtractor/trajectory_in_inds.pkl', 'rb'))
dats_ = pickle.load(open('./HiddenStateExtractor/%s_PCA.pkl' % feat, 'rb'))
sizes = pickle.load(open(DATA_ROOT + '/Data/EncodedSizes.pkl', 'rb'))
ss = [sizes[f][0] for f in fs]
PC1_vals = dats_[:, 0]
PC1_range = (np.quantile(PC1_vals, 0.4), np.quantile(PC1_vals, 0.6))
PC2_vals = dats_[:, 1]
PC2_range = (np.quantile(PC2_vals, 0.4), np.quantile(PC2_vals, 0.6))
# PC1
vals = dats_[:, 0]
path = '/data/michaelwu/CellVAE/PC_samples/PC1'
val_std = np.std(vals)
thr0 = np.quantile(vals, 0.1)
thr1 = np.quantile(vals, 0.9)
samples0 = [f for i, f in enumerate(fs) if vals[i] < thr0]
samples1 = [f for i, f in enumerate(fs) if vals[i] > thr1]
sample_ts = []
for t in trajs:
traj_PCs = np.array([vals[ind] for ind in trajs[t]])
start = np.mean(traj_PCs[:3])
end = np.mean(traj_PCs[-3:])
traj_PC_diff = traj_PCs[1:] - traj_PCs[:-1]
if np.abs(end - start) > 1.2 * val_std and np.median(traj_PC_diff) < 0.5 * val_std:
sample_ts.append(t)
np.random.seed(123)
for i, f in enumerate(np.random.choice(samples0, (10,), replace=False)):
plot_patch(f, path + '/sample_low_%d.png' % i)
for i, f in enumerate(np.random.choice(samples1, (10,), replace=False)):
plot_patch(f, path + '/sample_high_%d.png' % i)
for t in np.random.choice(sample_ts, (10,), replace=False):
save_traj(t, path + '/sample_traj_%s.gif' % t.replace('/', '_'))
# PC2, controlling for PC1
vals = dats_[:, 1]
path = '/data/michaelwu/CellVAE/PC_samples/PC2'
vals_filtered = [v for i, v in enumerate(vals) if PC1_range[0] < PC1_vals[i] < PC1_range[1]]
val_std = np.std(vals_filtered)
thr0 = np.quantile(vals_filtered, 0.1)
thr1 = np.quantile(vals_filtered, 0.9)
samples0 = [f for i, f in enumerate(fs) if vals[i] < thr0 and PC1_range[0] < PC1_vals[i] < PC1_range[1]]
samples1 = [f for i, f in enumerate(fs) if vals[i] > thr1 and PC1_range[0] < PC1_vals[i] < PC1_range[1]]
sample_ts = []
for t in trajs:
traj_PCs = np.array([vals[ind] for ind in trajs[t]])
start = np.mean(traj_PCs[:3])
end = np.mean(traj_PCs[-3:])
traj_PC_diff = traj_PCs[1:] - traj_PCs[:-1]
if np.abs(end - start) > 1.2 * val_std and np.median(traj_PC_diff) < 0.5 * val_std:
sample_ts.append(t)
| np.random.seed(123) | numpy.random.seed |
#!/usr/bin/env python3
import rospy
from dr_phil_hardware.vision.camera import Camera
from dr_phil_hardware.vision.lidar import Lidar
from dr_phil_hardware.vision.ray import Ray
from dr_phil_hardware.vision.localisation import localize_pixel
from dr_phil_hardware.vision.utils import invert_homog_mat
import numpy as np
from sensor_msgs.msg import CameraInfo, LaserScan
import tf
from geometry_msgs.msg import PointStamped,Point
from visualization_msgs.msg import Marker,MarkerArray
from tf2_msgs.msg import TFMessage
class HandleLocalizer:
""" Node which listens to 2d points signifying features on the camera, and localizes them and then visualises them by publising
a marker message
"""
def __init__(self,feature_topic):
self.robot_frame = "base_link"
self.lidar_frame = "base_scan"
self.camera_info_sub = rospy.Subscriber("/camera_info",CameraInfo,callback=self.camera_info_callback)
self.feature_sub = rospy.Subscriber(feature_topic,PointStamped,callback=self.feature_callback)
self.feature_pub = rospy.Publisher("/handle_feature/camera_points",MarkerArray,queue_size=10)
self.scan_sub = rospy.Subscriber("/scan_filtered",LaserScan,callback=self.scan_callback)
self.camera = None
self.point = None
self.scan = None
def scan_callback(self,scan : LaserScan):
self.scan = scan
def camera_info_callback(self,camera_info : CameraInfo):
self.camera = Camera(camera_info)
transform_listener = tf.TransformListener()
transformerRos = tf.TransformerROS()
# this will block
rospy.wait_for_message("/tf",TFMessage)
# wait for camera transform to become available
transform_listener.waitForTransform(
self.camera.get_frame_id(),
self.robot_frame,
rospy.Time.now(),
timeout=rospy.Duration(2))
# get transform
(trans,rot) = transform_listener.lookupTransform(
self.camera.get_frame_id(),
self.robot_frame,
rospy.Time.now())
try:
self.rob2cam = transformerRos.fromTranslationRotation(trans,rot)
self.camera.setup_transform(self.rob2cam)
except (tf.ConnectivityException,tf.LookupException,tf.ExtrapolationException):
rospy.logerr("Could not find camera transform!")
self.lidar = Lidar()
# wait for lidar transform to become available
transform_listener.waitForTransform(
self.lidar_frame,
self.robot_frame,
rospy.Time.now(),
timeout=rospy.Duration(2))
# get lidar transform
(trans,rot) = transform_listener.lookupTransform(
self.lidar_frame,
self.robot_frame,
rospy.Time.now())
try:
self.rob2lid = transformerRos.fromTranslationRotation(trans,rot)
self.lidar.setup_transform(self.rob2lid)
except (tf.ConnectivityException,tf.LookupException,tf.ExtrapolationException):
rospy.logerr("Could not find camera transform!")
rospy.loginfo("Received camera info")
# get rid of subscriber
self.camera_info_sub.unregister()
def feature_callback(self,point : PointStamped):
self.point = point
def create_point_from_vec(self,vec,id):
pnt = Marker()
if pnt is None:
return pnt
pnt.header.frame_id = self.robot_frame
pnt.header.stamp = rospy.Time.now()
pnt.ns = "handle_features"
pnt.id = id
pnt.type = 2 # sphere
pnt.pose.orientation.w = 1
pnt.color.a = 1
pnt.color.b = 1
pnt.action = 0 # add/modify
pnt.scale.x = 0.05
pnt.scale.y = 0.05
pnt.scale.z = 0.05
pnt.lifetime = rospy.Duration(10)
pnt.frame_locked = True
pnt.pose.position.x = vec[0,0]
pnt.pose.position.y = vec[1,0]
pnt.pose.position.z = vec[2,0]
return pnt
def create_arrow_from_ray(self,ray,id):
arw = Marker()
if ray is None:
return arw
arw.header.frame_id = self.robot_frame
arw.header.stamp = rospy.Time.now()
arw.ns = "handle_features"
arw.id = id
arw.type = 0 # arrow
p1 = Point()
orig = ray.origin
p1.x,p1.y,p1.z = (orig[0],orig[1],orig[2])
p2 = Point()
p2.x,p2.y,p2.z = (ray.get_point()[0],ray.get_point()[1],ray.get_point()[2])
arw.points = [p1,p2]
arw.pose.orientation.w = 1
arw.color.a = 1
arw.color.b = 1
arw.action = 0 # add/modify
arw.scale.x = 0.01
arw.scale.y = 0.02
arw.scale.z = 0.05
arw.lifetime = rospy.Duration(10)
arw.frame_locked = True
return arw
def visualise(self):
point_np = | np.array([[self.point.point.x],[self.point.point.y]]) | numpy.array |
import contextlib
import random
import warnings
from collections import OrderedDict, defaultdict
from inspect import ismethod
from typing import Dict, List, Optional, Tuple, Union, Sequence, Callable, Any, \
Iterator, Text
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from six import string_types
from sklearn import metrics
from sklearn.mixture import GaussianMixture
from tensorflow_probability.python.distributions import Distribution, Normal, \
Bernoulli
from tqdm import tqdm
from typeguard import typechecked
from typing_extensions import Literal
from odin import visual as vs
from odin.backend import TensorType
from odin.bay.distributions import Batchwise, QuantizedLogistic, \
MixtureQuantizedLogistic
from odin.bay.vi._base import VariationalModel
from odin.bay.vi.autoencoder import VariationalAutoencoder
from odin.bay.vi.losses import total_correlation
from odin.bay.vi.metrics import (Correlation, beta_vae_score, dci_scores,
factor_vae_score, mutual_info_gap,
separated_attr_predictability,
correlation_matrix, mutual_info_estimate,
importance_matrix, relative_strength)
from odin.bay.vi.utils import discretizing
from odin.fuel import get_dataset, ImageDataset
from odin.ml import DimReduce, fast_kmeans
from odin.search import diagonal_linear_assignment
from odin.utils import as_tuple, uuid
__all__ = [
'GroundTruth',
'DisentanglementGym',
'Correlation',
'DimReduce',
'plot_latent_stats'
]
DataPartition = Literal['train', 'valid', 'test']
CorrelationMethod = Literal['spearman', 'pearson', 'lasso', 'mi', 'importance']
ConvertFunction = Callable[[List[Distribution]], tf.Tensor]
Axes = Union[None, plt.Axes, Sequence[int], int]
FactorFilter = Union[Callable[[Any], bool],
Dict[Union[str, int], int],
float, int, str,
None]
DatasetNames = Literal['shapes3d', 'shapes3dsmall', 'shapes3d0',
'dsprites', 'dspritessmall', 'dsprites0',
'celeba', 'celebasmall',
'fashionmnist', 'mnist',
'cifar10', 'cifar100', 'svhn',
'cortex', 'pbmc',
'halfmoons']
def _reshape2D(x: tf.Tensor) -> tf.Tensor:
if x.shape.rank == 1:
return x
return tf.reshape(x, (x.shape[0], -1))
def concat_mean(dists: List[Distribution]) -> tf.Tensor:
return tf.concat([_reshape2D(d.mean()) for d in dists], -1)
def first_mean(dists: List[Distribution]) -> tf.Tensor:
return _reshape2D(dists[0].mean())
# ===========================================================================
# Helpers
# ===========================================================================
_CACHE = defaultdict(dict)
def _dist(p: Union[Distribution, Sequence[Distribution]]
) -> Union[Sequence[Distribution], Distribution]:
"""Convert DeferredTensor back to original Distribution"""
if isinstance(p, (tuple, list)):
return [_dist(i) for i in p]
p: Distribution
return (p.parameters['distribution']
if 'deferred_tensor' in str(type(p)) else p)
def _save_image(arr, path):
from PIL import Image
if hasattr(arr, 'numpy'):
arr = arr.numpy()
im = Image.fromarray(arr)
im.save(path)
def _prepare_categorical(y: np.ndarray, ds: ImageDataset,
return_index: bool = False) -> np.ndarray:
"""Return categorical labels and factors-based label"""
if ds is None:
dsname = None
labels = None
else:
dsname = ds.name
labels = ds.labels
if hasattr(y, 'numpy'):
y = y.numpy()
if dsname is None: # unknown
y_categorical = tf.argmax(y, axis=-1)
names = np.array([f'#{i}' for i in range(y.shape[1])])
elif dsname in ('mnist', 'fashionmnist', 'cifar10', 'cifar100', 'cortex'):
y_categorical = tf.argmax(y, axis=-1)
names = labels
elif 'celeba' in dsname:
y_categorical = tf.argmax(y, axis=-1)
raise NotImplementedError
elif 'shapes3d' in dsname:
y_categorical = y[:, 2]
names = ['cube', 'cylinder', 'sphere', 'round']
elif 'shapes3d0' in dsname:
y_categorical = tf.argmax(y, -1)
names = ['cube', 'cylinder', 'sphere', 'round']
elif 'dsprites' in dsname:
y_categorical = y[:, 2]
names = ['square', 'ellipse', 'heart']
elif 'dsprites0' in dsname:
y_categorical = tf.argmax(y, -1)
names = ['square', 'ellipse', 'heart']
elif 'halfmoons' in dsname:
y_categorical = y[:, -1]
names = ['circle', 'square', 'triangle', 'pentagon']
elif 'pbmc' == dsname:
names = ['CD4', 'CD8', 'CD45RA', 'CD45RO']
y_probs = []
for x in [i for n in names for i, l in zip(y.T, labels) if n == l]:
x = x[:, np.newaxis]
gmm = GaussianMixture(n_components=2,
covariance_type='full',
n_init=2,
random_state=1)
gmm.fit(x)
y_probs.append(gmm.predict_proba(x)[:, np.argmax(gmm.means_.ravel())])
y_categorical = np.argmax(np.vstack(y_probs).T, axis=1)
else:
raise RuntimeError(f'No support for dataset: {dsname}')
if return_index:
return y_categorical
return np.asarray([names[int(i)] for i in y_categorical])
def _prepare_images(x, normalize=False):
"""if normalize=True, normalize the image to [0, 1], used for the
reconstructed or generated image, not the original one.
"""
x = np.asarray(x)
n_images = x.shape[0]
if normalize:
vmin = x.reshape((n_images, -1)).min(axis=1).reshape((n_images, 1, 1, 1))
vmax = x.reshape((n_images, -1)).max(axis=1).reshape((n_images, 1, 1, 1))
x = (x - vmin) / (vmax - vmin)
if x.shape[-1] == 1: # grayscale image
x = np.squeeze(x, -1)
else: # color image
x = np.transpose(x, (0, 3, 1, 2))
return x
def plot_latent_stats(mean,
stddev,
kld=None,
weights=None,
ax=None,
name='q(z|x)'):
# === 2. plotting
ax = vs.to_axis(ax)
l1 = ax.plot(mean,
label='mean',
linewidth=0.5,
marker='o',
markersize=3,
color='r',
alpha=0.5)
l2 = ax.plot(stddev,
label='stddev',
linewidth=0.5,
marker='^',
markersize=3,
color='g',
alpha=0.5)
# ax.set_ylim(-1.5, 1.5)
ax.tick_params(axis='y', colors='r')
ax.set_ylabel(f'{name} Mean', color='r')
ax.grid(True)
lines = l1 + l2
## plotting the weights
if kld is not None or weights is not None:
ax = ax.twinx()
if kld is not None:
lines += plt.plot(kld,
label='KL(q|p)',
linestyle='--',
color='y',
marker='s',
markersize=2.5,
linewidth=1.0,
alpha=0.5)
if weights is not None:
l3 = ax.plot(weights,
label='weights',
linewidth=1.0,
linestyle='--',
marker='s',
markersize=2.5,
color='b',
alpha=0.5)
ax.tick_params(axis='y', colors='b')
ax.grid(False)
ax.set_ylabel('L2-norm weights', color='b')
lines += l3
ax.legend(lines, [l.get_label() for l in lines], fontsize=8)
ax.grid(alpha=0.5)
return ax
def _boostrap_sampling(
model: VariationalModel,
inputs: List[np.ndarray],
groundtruth: 'GroundTruth',
n_samples: int,
batch_size: int,
verbose: bool,
seed: int,
):
assert inputs.shape[0] == groundtruth.shape[0], \
('Number of samples mismatch between inputs and ground-truth, '
f'{inputs.shape[0]} != {groundtruth.shape[0]}')
inputs = as_tuple(inputs)
Xs = [list() for _ in range(len(inputs))] # inputs
Zs = [] # latents
Os = [] # outputs
indices = []
n = 0
random_state = np.random.RandomState(seed=seed)
prog = tqdm(desc=f'Sampling', total=n_samples, disable=not verbose)
while n < n_samples:
batch = min(batch_size, n_samples - n, groundtruth.shape[0])
if verbose:
prog.update(batch)
# factors
_, ids = groundtruth.sample_factors(n_per_factor=batch,
return_indices=True,
seed=random_state.randint(0, 1e8))
indices.append(ids)
# inputs
inps = []
for xi, inp in zip(Xs, inputs):
if tf.is_tensor(inp):
inp = tf.gather(inp, indices=ids, axis=0)
else:
inp = inp[ids]
xi.append(inp)
inps.append(inp)
# latents representation
z = model.encode(inps[0] if len(inps) == 1 else inps, training=False)
o = tf.nest.flatten(as_tuple(model.decode(z, training=False)))
# post-process latents
z = as_tuple(z)
if len(z) == 1:
z = z[0]
Os.append(o)
Zs.append(z)
# update the counter
n += len(ids)
# end progress
prog.clear()
prog.close()
# aggregate all data
Xs = [np.concatenate(x, axis=0) for x in Xs]
if isinstance(Zs[0], Distribution):
Zs = Batchwise(Zs, name="Latents")
else:
Zs = Blockwise(
[
Batchwise(
[z[zi] for z in Zs],
name=f"Latents{zi}",
) for zi in range(len(Zs[0]))
],
name="Latents",
)
Os = [
Batchwise(
[j[i] for j in Os],
name=f"Output{i}",
) for i in range(len(Os[0]))
]
indices = np.concatenate(indices, axis=0)
groundtruth = groundtruth[indices]
return Xs, groundtruth, Zs, Os, indices
# ===========================================================================
# GroundTruth
# ===========================================================================
def _fast_samples_indices(known: np.ndarray, factors: np.ndarray):
outputs = [-1] * len(known)
for k_idx in range(len(known)):
for f_idx in range(len(factors)):
if np.array_equal(known[k_idx], factors[f_idx]):
if outputs[k_idx] < 0:
outputs[k_idx] = f_idx
elif bool(random.getrandbits(1)):
outputs[k_idx] = f_idx
return outputs
try:
# with numba: ~1.3 sec
# without numba: ~19.3 sec
# ~15 times faster
from numba import jit
_fast_samples_indices = jit(
_fast_samples_indices,
# target='cpu',
cache=False,
parallel=False,
nopython=True)
except ImportError:
pass
def _create_factor_filter(known: FactorFilter,
factor_names: List[str]
) -> Callable[[Any], bool]:
if callable(known):
return known
if known is None:
known = {}
if isinstance(known, dict):
known = {
factor_names.index(k) if isinstance(k, string_types) else int(k): v
for k, v in known.items()
}
return lambda x: all(x[k] == v for k, v in known.items())
else:
return lambda x: x == known
class GroundTruth:
"""Discrete factor for disentanglement analysis. If the factors is continuous,
the values are casted to `int64` For discretizing continuous factor
`odin.bay.vi.discretizing`
Parameters
----------
factors : [type]
`[num_samples, n_factors]`, an Integer array
factor_names : [type], optional
None or `[n_factors]`, list of name for each factor, by default None
categorical : Union[bool, List[bool]], optional
list of boolean indicate if the given factor is categorical values or
continuous values.
This gives significant meaning when trying to visualize
the factors, by default False.
Attributes
---------
factor_labels : list of array,
unique labels for each factor
factor_sizes : list of Integer,
number of factor for each factor
Reference
---------
Google research: https://github.com/google-research/disentanglement_lib
Raises
------
ValueError
factors must be a matrix
"""
def __init__(
self,
factors: Union[tf.Tensor, np.ndarray, tf.data.Dataset],
factor_names: Optional[Sequence[str]] = None,
categorical: Union[bool, List[bool]] = False,
n_bins: Optional[Union[int, List[int]]] = None,
strategy: Literal['uniform', 'quantile', 'kmeans', 'gmm'] = 'uniform',
):
if isinstance(factors, tf.data.Dataset):
factors = tf.stack([x for x in factors])
if tf.is_tensor(factors):
factors = factors.numpy()
factors = np.atleast_2d(factors)
if factors.ndim != 2:
raise ValueError("factors must be a matrix [n_observations, n_factor], "
f"but given shape:{factors.shape}")
# check factors is one-hot encoded
if np.all( | np.sum(factors, axis=-1) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''footprints.py - <NAME> (<EMAIL>) - Sep 2018
License: MIT - see the LICENSE file for the full text.
This contains functions that get info about LC collection footprints.
'''
#############
## LOGGING ##
#############
import logging
from lccserver import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os.path
import math
import subprocess
import shutil
import numpy as np
from scipy.spatial import Delaunay
try:
from astropy.coordinates import SkyCoord
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
import matplotlib
import matplotlib.patheffects as path_effects
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scour
except ImportError:
raise ImportError(
"The following packages must be installed (via pip) "
"to use this module: "
"matplotlib>=2.0, shapely>=1.6, astropy>=3.0, and scour>=0.37"
)
from .dbsearch import sqlite_column_search
from .datasets import results_limit_rows, results_random_sample
#################
## ALPHA SHAPE ##
#################
# generating a concave hull (or "alpha shape") of RADEC coverage, using the
# Delaunay triangulation and removing triangles with too large area.
#
# originally from:
# http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/
#
def alpha_shape(points, alpha):
"""Compute the alpha shape (concave hull) of a set of points.
https://en.wikipedia.org/wiki/Alpha_shape
@param points: Iterable container of points.
@param alpha: alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
The returned things are:
a shapely.Polygon object, a list of edge points
To get a list of points making up the Polygon object, do:
>>> extcoords = np.array(concave_hull.exterior.coords)
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(coords[ [i, j] ])
tri = Delaunay(points)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.simplices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)
b = math.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)
c = math.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c)/2.0
# Area of triangle by Heron's formula
area = math.sqrt(s*(s-a)*(s-b)*(s-c))
circum_r = a*b*c/(4.0*area)
# Here's the radius filter.
# print circum_r
if circum_r < 1.0/alpha:
add_edge(edges, edge_points, points, ia, ib)
add_edge(edges, edge_points, points, ib, ic)
add_edge(edges, edge_points, points, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
############################################
## CONVEX HULL AND ALPHA SHAPE GENERATION ##
############################################
def collection_convex_hull(basedir,
collection,
randomsample=None,
limit=None,
conditions='(ndet > 49)',
hull_buffer=0.5):
'''This gets the convex hull for an LC collection.
conditions is a filter string to be passed into the
dbsearch.sqlite_column_search function.
'''
# get the ra/dec
res = sqlite_column_search(basedir,
getcolumns=['ra','decl'],
conditions=conditions,
lcclist=[collection])
if res and len(res[collection]['result']) > 0:
rows = res[collection]['result']
if randomsample is not None:
rows = results_random_sample(rows, sample_count=randomsample)
if limit is not None:
rows = results_limit_rows(rows,
rowlimit=limit,
incoming_userid=1,
incoming_role='superuser')
ra = np.array([x['ra'] for x in rows])
decl = np.array([x['decl'] for x in rows])
points = np.column_stack((ra, decl))
# now generate a shapely convex_hull object that we can pickle
shapely_points = geometry.MultiPoint(list(points))
shapely_convex_hull = shapely_points.convex_hull
if hull_buffer is not None:
shapely_convex_hull = shapely_convex_hull.buffer(hull_buffer)
return (
shapely_convex_hull,
np.array(shapely_convex_hull.exterior.coords)
)
else:
LOGERROR('no objects found in collection: %s with conditions: %s' %
(collection, conditions))
def collection_alpha_shape(basedir,
collection,
alpha=0.7,
randomsample=None,
limit=None,
conditions='(ndet > 49)',
hull_buffer=0.5):
'''This gets the alpha shape (concave hull) for an LC collection.
conditions is a filter string to be passed into the
dbsearch.sqlite_column_search function.
'''
# get the ra/dec
res = sqlite_column_search(basedir,
getcolumns=['ra','decl'],
conditions=conditions,
lcclist=[collection],
incoming_userid=1,
incoming_role='superuser')
if res and len(res[collection]['result']) > 0:
rows = res[collection]['result']
if randomsample is not None:
rows = results_random_sample(rows, sample_count=randomsample)
if limit is not None:
rows = results_limit_rows(rows,
rowlimit=limit,
incoming_userid=1,
incoming_role='superuser')
ra = np.array([x['ra'] for x in rows])
decl = np.array([x['decl'] for x in rows])
points = np.column_stack((ra, decl))
shapely_concave_hull, edge_points = alpha_shape(points,
alpha=alpha)
if hull_buffer is not None:
shapely_concave_hull = shapely_concave_hull.buffer(hull_buffer)
# get the coordinates of the hull
try:
hull_coords = np.array(shapely_concave_hull.exterior.coords)
except Exception:
LOGWARNING('this concave hull may have multiple '
'unconnected sections, the alpha parameter '
'might be too high. returning a shapely.MultiPolygon '
'object and list of edge coords')
hull_coords = []
if isinstance(shapely_concave_hull, MultiPolygon):
for geom in shapely_concave_hull:
hull_coords.append(np.array(geom.exterior.coords))
elif isinstance(shapely_concave_hull, Polygon):
if (shapely_concave_hull.area > 0.0 and
shapely_concave_hull.exterior):
hull_coords = np.array(shapely_concave_hull.exterior.coords)
else:
LOGERROR('the concave hull has area = 0.0, '
'alpha = %s is likely too high '
'for this object' % alpha)
return shapely_concave_hull, None
else:
LOGERROR('unknown geometry returned')
return None, None
return shapely_concave_hull, hull_coords
####################################
## COLLECTION FOOTPRINT FUNCTIONS ##
####################################
def generate_collection_footprint(
basedir,
collection,
alpha=0.7,
randomsample=None,
limit=None,
conditions='(ndet > 49)',
hull_buffer=0.5,
):
'''This generates the convex and concave hulls for a collection.
Saves them to a collection-footprint.pkl pickle in the collection's
directory.
'''
convex_hull, convex_boundary_points = collection_convex_hull(
basedir,
collection,
randomsample=randomsample,
limit=limit,
conditions=conditions,
hull_buffer=hull_buffer,
)
concave_hull, concave_boundary_points = collection_alpha_shape(
basedir,
collection,
alpha=alpha,
randomsample=randomsample,
limit=limit,
conditions=conditions,
hull_buffer=hull_buffer,
)
footprint = {
'collection': collection,
'args':{
'alpha':alpha,
'randomsample':randomsample,
'limit':limit,
'conditions':conditions,
'hull_buffer':hull_buffer
},
'convex_hull': convex_hull,
'convex_hull_boundary': convex_boundary_points,
'concave_hull': concave_hull,
'concave_hull_boundary': concave_boundary_points,
}
outpickle = os.path.join(basedir,
collection.replace('_','-'),
'catalog-footprint.pkl')
with open(outpickle, 'wb') as outfd:
pickle.dump(footprint, outfd, pickle.HIGHEST_PROTOCOL)
return outpickle
#####################################
## COLLECTION OVERVIEW PLOT MAKING ##
#####################################
def collection_overview_plot(collection_dirlist,
outfile,
use_hull='concave',
use_projection='mollweide',
use_colormap='inferno',
use_colorlist=None,
use_alpha=0.5,
show_galactic_plane=True,
show_ecliptic_plane=True,
east_is_left=True,
dpi=200):
'''This generates a coverage map plot for all of the collections in
collection_dirlist.
Writes to outfile. This should probably go into the basedir docs/static
directory.
Gets the hulls from the catalog-footprint.pkl files in each collection's
directory.
'''
if isinstance(use_colorlist, (list, tuple)):
if len(use_colorlist) != len(collection_dirlist):
LOGERROR("the color list provided must have the same "
"length as the collection_dirlist")
return None
# label sizes
matplotlib.rcParams['xtick.labelsize'] = 16.0
matplotlib.rcParams['ytick.labelsize'] = 16.0
# fonts for the entire thing
matplotlib.rcParams['font.size'] = 16
# lines
matplotlib.rcParams['lines.linewidth'] = 2.0
# axes
matplotlib.rcParams['axes.linewidth'] = 2.0
matplotlib.rcParams['axes.labelsize'] = 14.0
# xtick setup
matplotlib.rcParams['xtick.major.size'] = 10.0
matplotlib.rcParams['xtick.minor.size'] = 5.0
matplotlib.rcParams['xtick.major.width'] = 1.0
matplotlib.rcParams['xtick.minor.width'] = 1.0
matplotlib.rcParams['xtick.major.pad'] = 8.0
# ytick setup
matplotlib.rcParams['ytick.major.size'] = 10.0
matplotlib.rcParams['ytick.minor.size'] = 5.0
matplotlib.rcParams['ytick.major.width'] = 1.0
matplotlib.rcParams['ytick.minor.width'] = 1.0
matplotlib.rcParams['ytick.major.pad'] = 8.0
# svg font setup
plt.rcParams['svg.fonttype'] = 'none'
fig = plt.figure(figsize=(14,12))
ax = fig.add_subplot(111, projection=use_projection)
ax.set_facecolor('#e2e3e5')
if show_galactic_plane:
LOGINFO('plotting the Galactic plane')
galactic_plane = SkyCoord(
np.arange(0,360.0,0.25),0.0,frame='galactic',unit='deg'
).icrs
galactic_plane_ra = np.array([x.ra.value for x in galactic_plane])
galactic_plane_decl = np.array([x.dec.value for x in galactic_plane])
galra = galactic_plane_ra[::]
galdec = galactic_plane_decl[::]
galra[galra > 180.0] = galra[galra > 180.0] - 360.0
if east_is_left:
galra = -galra
ax.scatter(
np.radians(galra),
np.radians(galdec),
s=25,
color='#ffc107',
marker='o',
zorder=-99,
label='Galactic plane',
rasterized=True
)
if show_ecliptic_plane:
LOGINFO('plotting the ecliptic plane')
# ecliptic plane
ecliptic_equator = SkyCoord(
np.arange(0,360.0,0.25),
0.0,
frame='geocentrictrueecliptic',unit='deg'
).icrs
ecliptic_equator_ra = np.array(
[x.ra.value for x in ecliptic_equator]
)
ecliptic_equator_decl = np.array(
[x.dec.value for x in ecliptic_equator]
)
eclra = ecliptic_equator_ra[::]
ecldec = ecliptic_equator_decl[::]
eclra[eclra > 180.0] = eclra[eclra > 180.0] - 360.0
if east_is_left:
eclra = -eclra
ax.scatter(
np.radians(eclra),
np.radians(ecldec),
s=25,
color='#6c757d',
marker='o',
zorder=-80,
label='Ecliptic plane',
rasterized=True
)
#
# now, we'll go through each collection
#
collection_labels = {}
for ci, cdir in enumerate(collection_dirlist):
LOGINFO('plotting footprint for collection: %s' % cdir.replace('-','_'))
footprint_pkl = os.path.join(cdir, 'catalog-footprint.pkl')
with open(footprint_pkl,'rb') as infd:
footprint = pickle.load(infd)
hull_boundary = footprint['%s_hull_boundary' % use_hull]
hull = footprint['%s_hull' % use_hull]
if isinstance(hull_boundary, np.ndarray):
covras = hull_boundary[:,0]
covdecls = hull_boundary[:,1]
# wrap the RAs
covras[covras > 180.0] = covras[covras > 180.0] - 360.0
if east_is_left:
covras = -covras
if isinstance(use_colorlist, (list, tuple)):
ax.fill(
| np.radians(covras) | numpy.radians |
import os
import argparse
from datetime import datetime
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
from time import sleep
import numpy as np
import torch
import torchvision
from torch.nn.parallel import DataParallel
from torch.nn import Conv2d
from torch.nn.init import kaiming_normal_
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import albumentations as A
from simclr.model import load_optimizer, save_model
from simclr.modules import SimCLR, NT_Xent, get_resnet
from simclr.modules.transformations import TransformsSimCLR
from simclr.modules.sync_batchnorm import convert_model
from simclr.utils import yaml_config_hook
from lunglens.core import *
from lunglens.loaders import *
def train(args, data_loader, model, criterion, optimizer, writer):
loss_epoch = 0
# super batch contain N slices per M scans
for step, (superbatch0, superbatch1) in enumerate(tqdm(data_loader)):
num_scans, slices_per_scan = superbatch0.shape[:2]
# TODO: check what does non_blocking param do and if we need it
# squash to list batches to one batch
# [N, M, ...] -> [1, N * M, ...]
x_i = squash_scan_batches(superbatch0).to(args.device)
x_j = squash_scan_batches(superbatch1).to(args.device)
# compute encodings for joined batches
h_i, h_j, z_i, z_j = model(x_i, x_j)
if args.single_scan_loss:
# split joined batch into single batch per scan
# so we calculate loss looking at a single scan at a time
z_i = split_scans(z_i, num_scans, slices_per_scan)
z_j = split_scans(z_j, num_scans, slices_per_scan)
loss = 0
optimizer.zero_grad()
# enumerate batches in superbatches
#print(z_i.shape, z_j.shape)
if args.single_scan_loss:
for (z_i_batch, z_j_batch) in zip(z_i, z_j):
loss += criterion(z_i_batch, z_j_batch)
do_step = True
else:
# not single scan loss
if z_i.shape[0] == criterion.batch_size:
loss += criterion(z_i, z_j)
do_step = True
else:
do_step = False
# probably end of epoch, not enough data
if do_step:
loss.backward()
optimizer.step()
if args.nr == 0 and step > 0 and step % 20 == 0:
print(f"Step [{step}/{len(data_loader)}]\t Loss: {loss.item()}")
if args.nr == 0:
writer.add_scalar("Loss/train_epoch", loss.item(), args.global_step)
args.global_step += 1
loss_epoch += loss.item()
return loss_epoch
class NT_Xent_caclulator(NT_Xent):
def forward(self, z_i, z_j, visualize=False):
N = 2 * self.batch_size * self.world_size
z = torch.cat((z_i, z_j), dim=0)
if self.world_size > 1:
z = torch.cat(GatherLayer.apply(z), dim=0)
sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
sim_i_j = torch.diag(sim, self.batch_size * self.world_size)
sim_j_i = torch.diag(sim, -self.batch_size * self.world_size)
# We have 2N samples, but with Distributed training every GPU gets N examples too, resulting in: 2xNxN
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(
N, 1
)
negative_samples = sim[self.mask].reshape(N, -1)
labels = torch.zeros(N).to(positive_samples.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss, sim * self.temperature
def validate(data_loader, model, visualize=False):
for i, (superbatch0, superbatch1) in enumerate(data_loader):
x_i = squash_scan_batches(superbatch0).to(args.device)
x_j = squash_scan_batches(superbatch1).to(args.device)
h_i, h_j, z_i, z_j = model(x_i, x_j)
z_i, z_j = z_i.cpu().detach().numpy(), z_j.cpu().detach().numpy()
if i == 0:
z_i_vec, z_j_vec = np.copy(z_i), np.copy(z_j)
else:
z_i_vec = np.concatenate((z_i_vec, z_i))
z_j_vec = np.concatenate((z_j_vec, z_j))
#print(i, x_i.shape, x_j.shape, z_i.shape, z_j.shape, z_i_vec.shape, z_j_vec.shape)
num_pairs = z_i_vec.shape[0]
loss_calculator = NT_Xent_caclulator(batch_size=num_pairs, \
temperature=0.5, device='cpu', world_size=1)
loss, sim = loss_calculator(torch.from_numpy(z_i_vec), torch.from_numpy(z_j_vec))
#print('validation loss =', loss)
if visualize:
ur = sim[:num_pairs, num_pairs:]
bl = sim[num_pairs:, :num_pairs]
positives = np.concatenate((np.diag(ur), np.diag(bl))).flatten()
allpairs = np.concatenate((ur, bl)).flatten()
bins = | np.linspace(start=-1, stop=1, num=500) | numpy.linspace |
import math
import unittest
import numpy as np
from pydrake.common import FindResourceOrThrow
from pydrake.examples.pendulum import PendulumPlant
from pydrake.multibody.tree import MultibodyForces
from pydrake.multibody.plant import MultibodyPlant
from pydrake.multibody.parsing import Parser
from pydrake.systems.analysis import Simulator
from pydrake.systems.controllers import (
DiscreteTimeLinearQuadraticRegulator,
DynamicProgrammingOptions,
FiniteHorizonLinearQuadraticRegulator,
FiniteHorizonLinearQuadraticRegulatorOptions,
FiniteHorizonLinearQuadraticRegulatorResult,
FittedValueIteration,
InverseDynamicsController,
InverseDynamics,
LinearQuadraticRegulator,
LinearProgrammingApproximateDynamicProgramming,
MakeFiniteHorizonLinearQuadraticRegulator,
PeriodicBoundaryCondition,
PidControlledSystem,
PidController,
)
from pydrake.systems.framework import DiagramBuilder, InputPortSelection
from pydrake.systems.primitives import Integrator, LinearSystem
from pydrake.trajectories import Trajectory
class TestControllers(unittest.TestCase):
def test_fitted_value_iteration_pendulum(self):
plant = PendulumPlant()
simulator = Simulator(plant)
def quadratic_regulator_cost(context):
x = context.get_continuous_state_vector().CopyToVector()
x[0] = x[0] - math.pi
u = plant.EvalVectorInput(context, 0).CopyToVector()
return x.dot(x) + u.dot(u)
# Note: intentionally under-sampled to keep the problem small
qbins = np.linspace(0., 2.*math.pi, 11)
qdotbins = np.linspace(-10., 10., 11)
state_grid = [set(qbins), set(qdotbins)]
input_limit = 2.
input_mesh = [set(np.linspace(-input_limit, input_limit, 5))]
timestep = 0.01
num_callbacks = [0]
def callback(iteration, mesh, cost_to_go, policy):
# Drawing is slow, don't draw every frame.
num_callbacks[0] += 1
options = DynamicProgrammingOptions()
options.convergence_tol = 1.
options.periodic_boundary_conditions = [
PeriodicBoundaryCondition(0, 0., 2.*math.pi)
]
options.visualization_callback = callback
options.input_port_index = InputPortSelection.kUseFirstInputIfItExists
options.assume_non_continuous_states_are_fixed = False
policy, cost_to_go = FittedValueIteration(simulator,
quadratic_regulator_cost,
state_grid, input_mesh,
timestep, options)
self.assertGreater(num_callbacks[0], 0)
def test_linear_programming_approximate_dynamic_programming(self):
integrator = Integrator(1)
simulator = Simulator(integrator)
# minimum time cost function (1 for all non-zero states).
def cost_function(context):
x = context.get_continuous_state_vector().CopyToVector()
if (math.fabs(x[0]) > 0.1):
return 1.
else:
return 0.
def cost_to_go_function(state, parameters):
return parameters[0] * math.fabs(state[0])
state_samples = np.array([[-4., -3., -2., -1., 0., 1., 2., 3., 4.]])
input_samples = np.array([[-1., 0., 1.]])
timestep = 1.0
options = DynamicProgrammingOptions()
options.discount_factor = 1.
J = LinearProgrammingApproximateDynamicProgramming(
simulator, cost_function, cost_to_go_function, 1,
state_samples, input_samples, timestep, options)
self.assertAlmostEqual(J[0], 1., delta=1e-6)
def test_inverse_dynamics(self):
sdf_path = FindResourceOrThrow(
"drake/manipulation/models/"
"iiwa_description/sdf/iiwa14_no_collision.sdf")
plant = MultibodyPlant(time_step=0.01)
Parser(plant).AddModelFromFile(sdf_path)
plant.WeldFrames(plant.world_frame(),
plant.GetFrameByName("iiwa_link_0"))
plant.Finalize()
# Just test that the constructor doesn't throw.
controller = InverseDynamics(
plant=plant,
mode=InverseDynamics.InverseDynamicsMode.kGravityCompensation)
def test_inverse_dynamics_controller(self):
sdf_path = FindResourceOrThrow(
"drake/manipulation/models/"
"iiwa_description/sdf/iiwa14_no_collision.sdf")
plant = MultibodyPlant(time_step=0.01)
Parser(plant).AddModelFromFile(sdf_path)
plant.WeldFrames(plant.world_frame(),
plant.GetFrameByName("iiwa_link_0"))
plant.mutable_gravity_field().set_gravity_vector([0.0, 0.0, 0.0])
plant.Finalize()
# We verify the (known) size of the model.
kNumPositions = 7
kNumVelocities = 7
kNumActuators = 7
kStateSize = kNumPositions + kNumVelocities
self.assertEqual(plant.num_positions(), kNumPositions)
self.assertEqual(plant.num_velocities(), kNumVelocities)
self.assertEqual(plant.num_actuators(), kNumActuators)
kp = np.array([1., 2., 3., 4., 5., 6., 7.])
ki = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
kd = np.array([.5, 1., 1.5, 2., 2.5, 3., 3.5])
controller = InverseDynamicsController(robot=plant,
kp=kp,
ki=ki,
kd=kd,
has_reference_acceleration=True)
context = controller.CreateDefaultContext()
output = controller.AllocateOutput()
estimated_state_port = controller.get_input_port(0)
desired_state_port = controller.get_input_port(1)
desired_acceleration_port = controller.get_input_port(2)
control_port = controller.get_output_port(0)
self.assertEqual(desired_acceleration_port.size(), kNumVelocities)
self.assertEqual(estimated_state_port.size(), kStateSize)
self.assertEqual(desired_state_port.size(), kStateSize)
self.assertEqual(control_port.size(), kNumVelocities)
# Current state.
q = np.array([-0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3])
v = np.array([-0.9, -0.6, -0.3, 0.0, 0.3, 0.6, 0.9])
x = np.concatenate([q, v])
# Reference state and acceleration.
q_r = q + 0.1*np.ones_like(q)
v_r = v + 0.1*np.ones_like(v)
x_r = np.concatenate([q_r, v_r])
vd_r = np.array([1., 2., 3., 4., 5., 6., 7.])
integral_term = np.array([-1., -2., -3., -4., -5., -6., -7.])
vd_d = vd_r + kp*(q_r-q) + kd*(v_r-v) + ki*integral_term
estimated_state_port.FixValue(context, x)
desired_state_port.FixValue(context, x_r)
desired_acceleration_port.FixValue(context, vd_r)
controller.set_integral_value(context, integral_term)
# Set the plant's context.
plant_context = plant.CreateDefaultContext()
x_plant = plant.GetMutablePositionsAndVelocities(plant_context)
x_plant[:] = x
# Compute the expected value of the generalized forces using
# inverse dynamics.
tau_id = plant.CalcInverseDynamics(
plant_context, vd_d, MultibodyForces(plant))
# Verify the result.
controller.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(0).CopyToVector(),
tau_id))
def test_issue14355(self):
"""
DiagramBuilder.AddSystem() may not propagate keep alive relationships.
We use this test to show resolution at a known concrete point of
failure.
https://github.com/RobotLocomotion/drake/issues/14355
"""
def make_diagram():
# Use a nested function to ensure that all locals get garbage
# collected quickly.
# Construct a trivial plant and ID controller.
# N.B. We explicitly do *not* add this plant to the diagram.
controller_plant = MultibodyPlant(time_step=0.002)
controller_plant.Finalize()
builder = DiagramBuilder()
controller = builder.AddSystem(
InverseDynamicsController(
controller_plant,
kp=[],
ki=[],
kd=[],
has_reference_acceleration=False,
)
)
# Forward ports for ease of testing.
builder.ExportInput(
controller.get_input_port_estimated_state(), "x_estimated")
builder.ExportInput(
controller.get_input_port_desired_state(), "x_desired")
builder.ExportOutput(controller.get_output_port_control(), "u")
diagram = builder.Build()
return diagram
diagram = make_diagram()
# N.B. Without the workaround for #14355, we get a segfault when
# creating the context.
context = diagram.CreateDefaultContext()
diagram.GetInputPort("x_estimated").FixValue(context, [])
diagram.GetInputPort("x_desired").FixValue(context, [])
u = diagram.GetOutputPort("u").Eval(context)
np.testing.assert_equal(u, [])
def test_pid_controlled_system(self):
controllers = [
PidControlledSystem(plant=PendulumPlant(), kp=1., ki=0.,
kd=2., state_output_port_index=0,
plant_input_port_index=0),
PidControlledSystem(plant=PendulumPlant(), kp=[0], ki=[1],
kd=[2], state_output_port_index=0,
plant_input_port_index=0),
PidControlledSystem(plant=PendulumPlant(),
feedback_selector=np.eye(2), kp=1.,
ki=0., kd=2.,
state_output_port_index=0,
plant_input_port_index=0),
PidControlledSystem(plant=PendulumPlant(),
feedback_selector=np.eye(2),
kp=[0], ki=[1], kd=[2],
state_output_port_index=0,
plant_input_port_index=0),
]
for controller in controllers:
self.assertIsNotNone(controller.get_control_input_port())
self.assertIsNotNone(controller.get_state_input_port())
self.assertIsNotNone(controller.get_state_output_port())
def test_pid_controller(self):
controllers = [
PidController(kp=np.ones(3), ki=np.zeros(3),
kd=[1, 2, 3]),
PidController(state_projection=np.ones((6, 4)),
kp=np.ones(3), ki=np.zeros(3),
kd=[1, 2, 3]),
PidController(state_projection=np.ones((6, 4)),
output_projection=np.ones((4, 3)),
kp=np.ones(3), ki=np.zeros(3),
kd=[1, 2, 3]),
]
for controller in controllers:
self.assertEqual(controller.num_input_ports(), 2)
self.assertEqual(len(controller.get_Kp_vector()), 3)
self.assertEqual(len(controller.get_Ki_vector()), 3)
self.assertEqual(len(controller.get_Kd_vector()), 3)
self.assertIsNotNone(controller.get_input_port_estimated_state())
self.assertIsNotNone(controller.get_input_port_desired_state())
self.assertIsNotNone(controller.get_output_port_control())
def test_linear_quadratic_regulator(self):
A = np.array([[0, 1], [0, 0]])
B = np.array([[0], [1]])
C = np.identity(2)
D = np.array([[0], [0]])
double_integrator = LinearSystem(A, B, C, D)
Q = np.identity(2)
R = np.identity(1)
K_expected = np.array([[1, math.sqrt(3.)]])
S_expected = np.array([[math.sqrt(3), 1.], [1., math.sqrt(3)]])
(K, S) = LinearQuadraticRegulator(A, B, Q, R)
np.testing.assert_almost_equal(K, K_expected)
np.testing.assert_almost_equal(S, S_expected)
controller = LinearQuadraticRegulator(double_integrator, Q, R)
np.testing.assert_almost_equal(controller.D(), -K_expected)
context = double_integrator.CreateDefaultContext()
double_integrator.get_input_port(0).FixValue(context, [0])
controller = LinearQuadraticRegulator(double_integrator, context, Q, R)
np.testing.assert_almost_equal(controller.D(), -K_expected)
def test_discrete_time_linear_quadratic_regulator(self):
A = np.array([[1, 1], [0, 1]])
B = | np.array([[0], [1]]) | numpy.array |
import numpy as np
from psychopy import visual,core,monitors,event,gui, data
import os
import sys
import time
import wx
# #################
# Store info about the experiment session
expName = u'WCST' # from the Builder filename that created this script
task = ''
expInfo = {u'session': u'01', u'Participant ID': u'9999999'}
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
if len(sys.argv) > 1:
#tempFile.write("Entered if clause\n")
#tempFile.write('%s\n'%(sys.argv[2]))
expInfo['Participant ID'] = sys.argv[1]
#tempFile.write('%s\n'%(sys.argv[1]))
#tempFile.write('%s\n'%(sys.argv[2]))
PartDataFolder = sys.argv[2]
Tag = '1'
else:
dlg = gui.DlgFromDict(dictionary=expInfo)
if dlg.OK == False:
core.quit() # user pressed cancel
DataFolder = "../../data"
PartDataFolder = 'unorganized'
OutDir = os.path.join(DataFolder, PartDataFolder)
if not os.path.exists(OutDir):
os.mkdir(OutDir)
Tag = '1'
PartDataFolder = OutDir
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = os.path.join(PartDataFolder, '%s_%s_%s_%s_%s.csv' % (expInfo['Participant ID'],expName, task, Tag, expInfo['date']))
print(filename)
FontSize = 40
DisplayChoiceTime = 0.25 # Was 1
FeedbackDisplayTime = 1.5 # Was 2
InstructTime = 2
# #################
#V Load up the Card Order
FileName = '../WCST/CardOrder.csv'
CardOrder = np.genfromtxt(FileName, delimiter=',')
INSTRUCTIONS = ('Select one of the four cards displayed at the top of the screen such '
+'that the selected card matches the card displayed at the bottom of the screen. '
+'The cards can be matched based on three dimensions - color, number of objects '
+'or the shape of the objects they display. You will be given feedback whether the '
+'selected card was RIGHT or WRONG. Use the feedback to determine which '
+'dimension is targeted by feedback and based on it select the right card. The '
+'targeted dimension may change from time to time without notice.\n\nPress any key to begin')
X = 0
Y = 1
def pointInTriangle(t1,t2,t3,pt):
""" determines whether point PT is located inside
triangle with vertices at points T1, T2 and T3
"""
def sign(p1,p2,p3):
return (p1[X]-p3[X])*(p2[Y]-p3[Y])-(p2[X]-p3[X])*(p1[Y]-p3[Y])
b1=sign(pt,t1,t2)<0
b2=sign(pt,t2,t3)<0
b3=sign(pt,t3,t1)<0
return b1==b2 and b2==b3
def drawTriangle(M,t,value=1):
for i in range(M.shape[0]):
for j in range(M.shape[1]):
if pointInTriangle(t[0],t[1],t[2],(i,j)):
M[i,j]=value
return M
def drawCircle(M,pos,radius,value=1):
for i in range(M.shape[0]):
for j in range(M.shape[1]):
if np.sqrt((i-pos[X])**2+(j-pos[Y])**2)<radius:
M[i,j]=value
return M
def drawStar(M,pos,nv, ocr,icr):
""" pos - location of the star
nv - number of vertices
ocr - radius of outer circle
icr - radius of inner circle
"""
M=drawCircle(M,pos,icr)
phi= | np.linspace(0,2*np.pi,nv+1) | numpy.linspace |
import os
import time
import logging
import warnings
from pathlib import Path
from optparse import OptionParser
from multiprocessing import cpu_count
import numpy as np
import pandas as pd
from scipy.special import softmax
from PIL import Image
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from efficientnet_pytorch import EfficientNet
from utils import accuracy
from models import *
from dataset import UnlabelledDataset, CsvDataset
device = 'cuda' if torch.cuda.is_available() else 'cpu'
image_size = (512, 512)
def parse_args():
parser = OptionParser()
parser.add_option('-j', '--workers', dest='workers', default=cpu_count(), type='int',
help='number of data loading workers (default: n_cpus)')
parser.add_option('-b', '--batch-size', dest='batch_size', default=32, type='int',
help='batch size (default: 32)')
parser.add_option('--fn', '--feature-net', dest='feature_net_name', default='efficientnetb3',
help='Name of base model. Accepted values are inception/resnet152cbam/efficientnetb3 (default: efficientnetb3)')
parser.add_option('--gpu', '--gpu-ids', dest='gpu_ids', default='0',
help='IDs of gpu(s) to use in inference, multiple gpus should be seperated with commas')
parser.add_option('--de', '--do-eval', dest='do_eval', default=True,
help='If labels are provided, set True to evaluate metrics (default: True)')
parser.add_option('--csv', '--csv-labels-path', dest='csv_labels_path', default='folder',
help='If eval mode is set, set to "folder" to read labels from folders \
with classnames. Set to csv path to read labels from csv (default: folder)')
parser.add_option('--csv-headings', dest='csv_headings', default='image,label',
help='heading of image filepath and label column in csv')
parser.add_option('--dd', '--data-dir', dest='data_dir', default='data',
help='directory to images to run evaluation/ prediction')
parser.add_option('--cp', '--ckpt-path', dest='ckpt_path', default='./checkpoints/034.ckpt',
help='Path to saved model checkpoint (default: ./checkpoints/034.ckpt)')
parser.add_option('--od', '--output-dir', dest='output_dir', default='./output',
help='saving directory of extracted class probabilities csv file')
(options, args) = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = options.gpu_ids
return options, args
def main():
logging.basicConfig(
format='%(asctime)s: %(levelname)s: [%(filename)s:%(lineno)d]: %(message)s', level=logging.INFO)
warnings.filterwarnings("ignore")
options, args = parse_args()
predict_class_probabilities(options)
def prepare_dataloader(options):
"""Loads data from folder containing labelled folders of images/ csv containing filepaths and labels 0-195/
Unlabelled folder of images
"""
preprocess = transforms.Compose([
transforms.Resize(size=(image_size[0], image_size[1]), interpolation=Image.LANCZOS),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if options.do_eval:
if options.csv_labels_path == 'folder':
dataset = ImageFolder(str(options.data_dir), transform=preprocess)
else:
dataset = CsvDataset(str(options.data_dir), options.csv_labels_path,
options.csv_headings, transform=preprocess)
image_list = [sample[0] for sample in dataset.samples]
else:
# returns image without label
dataset = UnlabelledDataset(str(options.data_dir), transform=preprocess, shape=image_size)
image_list = dataset.image_list
data_loader = DataLoader(dataset, batch_size=options.batch_size, shuffle=False,
num_workers=options.workers, pin_memory=True)
logging.info(f'Extract Probabilities: Batch size: {options.batch_size}, Dataset size: {len(dataset)}')
return dataset, data_loader, image_list
def predict_class_probabilities(options):
"""Predicts class probabilities and optionally evaluates accuracy, precision,
recall and f1 score if labels are provided
Args:
options: parsed arguments
"""
# Initialize model
num_classes = 196
num_attentions = 64
if options.feature_net_name == 'resnet152cbam':
feature_net = resnet152_cbam(pretrained=True)
elif options.feature_net_name == 'efficientnetb3':
feature_net = EfficientNet.from_pretrained('efficientnet-b3')
elif options.feature_net_name == 'inception':
feature_net = inception_v3(pretrained=True)
else:
raise RuntimeError('Invalid model name')
net = WSDAN(num_classes=num_classes, M=num_attentions, net=feature_net)
# Load ckpt and get state_dict
checkpoint = torch.load(options.ckpt_path)
state_dict = checkpoint['state_dict']
# Load weights
net.load_state_dict(state_dict)
logging.info('Network loaded from {}'.format(options.ckpt_path))
# load feature center
feature_center = checkpoint['feature_center'].to(torch.device(device))
logging.info('feature_center loaded from {}'.format(options.ckpt_path))
# Use cuda
cudnn.benchmark = True
net.to(torch.device(device))
net = nn.DataParallel(net)
# Load dataset
dataset, data_loader, image_list = prepare_dataloader(options)
# Default Parameters
theta_c = 0.5
crop_size = image_size # size of cropped images for 'See Better'
# metrics initialization
batches = 0
epoch_loss = 0
epoch_acc = np.array([0, 0, 0], dtype='float') # top - 1, 3, 5
loss = nn.CrossEntropyLoss()
start_time = time.time()
net.eval()
y_pred_average = np.zeros((len(dataset), 196))
with torch.no_grad():
for i, sample in enumerate(tqdm(data_loader)):
if options.do_eval:
X, y = sample
y = y.to(torch.device(device))
else:
X = sample
X = X.to(torch.device(device))
# Raw Image
y_pred_raw, feature_matrix, attention_map = net(X)
# Object Localization and Refinement
crop_mask = F.upsample_bilinear(
attention_map, size=(X.size(2), X.size(3))) > theta_c
crop_images = []
for batch_index in range(crop_mask.size(0)):
nonzero_indices = torch.nonzero(crop_mask[batch_index, 0, ...])
height_min = nonzero_indices[:, 0].min()
height_max = nonzero_indices[:, 0].max()
width_min = nonzero_indices[:, 1].min()
width_max = nonzero_indices[:, 1].max()
crop_images.append(F.upsample_bilinear(
X[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max], size=crop_size))
crop_images = torch.cat(crop_images, dim=0)
y_pred_crop, _, _ = net(crop_images)
y_pred = (y_pred_raw + y_pred_crop) / 2
y_pred_average[i*options.batch_size:(i+1)*options.batch_size] = y_pred.cpu().numpy()
batches += 1
if options.do_eval:
# loss
batch_loss = loss(y_pred, y)
epoch_loss += batch_loss.item()
# metrics: top-1, top-3, top-5 error
epoch_acc += accuracy(y_pred, y, topk=(1, 3, 5))
end_time = time.time()
if options.do_eval:
epoch_loss /= batches
epoch_acc /= batches
logging.info('Valid: Loss %.5f, Accuracy: Top-1 %.4f, Top-3 %.4f, Top-5 %.4f, Time %3.2f' %
(epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2], end_time - start_time))
ground_truth = [sample[1] for sample in dataset.samples]
precision, recall, f1, _ = precision_recall_fscore_support(ground_truth, | np.argmax(y_pred_average, axis=1) | numpy.argmax |
import numpy as np
from numba import vectorize
# Size 为列表,为神经网络结构,比如[3,5,5,4,2],3是输入层神经元个数,中间为隐藏层每层神经元个数,2为输出层个数
class nn_Creat():
def __init__(self,Size,active_fun='sigmoid',learning_rate=1.5,batch_normalization=1,objective_fun='MSE',
output_function='sigmoid',optimization_method='normal',weight_decay=0):
self.Size=Size # 初始化网络参数,并进行打印
print('the structure of the NN is \n', self.Size)
self.active_fun=active_fun
print('active function is %s '% active_fun)
self.learning_rate=learning_rate
print('learning_rate is %s '% learning_rate)
self.batch_normalization=batch_normalization
print('batch_normalization is %d '% batch_normalization)
self.objective_fun=objective_fun
print('objective_function is %s '% objective_fun)
self.optimization_method=optimization_method
print('optimization_method is %s '% optimization_method)
self.weight_decay = weight_decay
print('weight_decay is %f '% weight_decay)
# 初始化网络权值和梯度
self.vecNum=0
self.depth=len(Size)
self.W=[]
self.b=[]
self.W_grad=[]
self.b_grad=[]
self.cost=[]
if self.batch_normalization: # 是否运用批量归一化,如果用,则引入期望E和方差S,以及缩放因子Gamma、Beta
self.E = []
self.S = []
self.Gamma = []
self.Beta = []
if objective_fun=='Cross Entropy': # 目标函数是否为交叉墒函数
self.output_function='softmax'
else:
self.output_function='sigmoid'
print('output_function is %s \n'% self.output_function)
print('Start training NN \n')
for item in range(self.depth-1):
width=self.Size[item]
height=self.Size[item+1]
q=2*np.random.rand(height,width)/np.sqrt(width)-1/np.sqrt(width) #初始化权系数W
self.W.append(q)
if self.active_fun=='relu': # 判断激活函数是否为relu函数,以决定b的初始化形式
self.b.append(np.random.rand(height,1)+0.01)
else:
self.b.append(2*np.random.rand(height,1)/np.sqrt(width)-1/np.sqrt(width))
if self.optimization_method=='Momentum': #优化方向是否使用矩形式,即为之前梯度的叠加
if item!=0:
self.vW.append(np.zeros([height,width]))
self.vb.append(np.zeros([height, 1]))
else:
self.vW=[]
self.vb=[]
self.vW.append(np.zeros([height, width]))
self.vb.append(np.zeros([height, 1]))
if self.optimization_method=='AdaGrad'or optimization_method=='RMSProp' or optimization_method=='Adam': #优化方法是否使用上述方法
if item!=0:
self.rW.append(np.zeros([height,width]))
self.rb.append(np.zeros([height, 1]))
else:
self.rW=[]
self.rb=[]
self.rW.append(np.zeros([height, width]))
self.rb.append(np.zeros([height, 1]))
if self.optimization_method == 'Adam': #优化方法是否为Adam方法
if item!=0:
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
else:
self.sW = []
self.sb = []
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
if self.batch_normalization: #是否对每层进行归一化
self.Gamma.append(np.array([1]))
self.Beta.append(np.array([0]))
self.E.append(np.zeros([height,1]))
self.S.append(np.zeros([height,1]))
if self.optimization_method=='Momentum': #在归一化基础上是否使用Momentun方法
if item!=0:
self.vGamma.append(np.array([1]))
self.vBeta.append(np.array([0]))
else:
self.vGamma = []
self.vBeta = []
self.vGamma.append(np.array([1]))
self.vBeta.append(np.array([0]))
if self.optimization_method == 'AdaGrad' or optimization_method == 'RMSProp' or optimization_method == 'Adam': # 在归一化基础上优化方法是否使用上述方法
if item!=0:
self.rGamma.append(np.array([0]))
self.rBeta.append(np.array([0]))
else:
self.rGamma = []
self.rBeta = []
self.rGamma.append( | np.array([0]) | numpy.array |
r'''
Chord estimation algorithms produce a list of intervals and labels which denote
the chord being played over each timespan. They are evaluated by comparing the
estimated chord labels to some reference, usually using a mapping to a chord
subalphabet (e.g. minor and major chords only, all triads, etc.). There is no
single 'right' way to compare two sequences of chord labels. Embracing this
reality, every conventional comparison rule is provided. Comparisons are made
over the different components of each chord (e.g. G:maj(6)/5): the root (G),
the root-invariant active semitones as determined by the quality
shorthand (maj) and scale degrees (6), and the bass interval (5).
This submodule provides functions both for comparing a sequences of chord
labels according to some chord subalphabet mapping and for using these
comparisons to score a sequence of estimated chords against a reference.
Conventions
-----------
A sequence of chord labels is represented as a list of strings, where each
label is the chord name based on the syntax of [#harte2010towards]_. Reference
and estimated chord label sequences should be of the same length for comparison
functions. When converting the chord string into its constituent parts,
* Pitch class counting starts at C, e.g. C:0, D:2, E:4, F:5, etc.
* Scale degree is represented as a string of the diatonic interval, relative to
the root note, e.g. 'b6', '#5', or '7'
* Bass intervals are represented as strings
* Chord bitmaps are positional binary vectors indicating active pitch classes
and may be absolute or relative depending on context in the code.
If no chord is present at a given point in time, it should have the label 'N',
which is defined in the variable ``mir_eval.chord.NO_CHORD``.
Metrics
-------
* :func:`mir_eval.chord.root`: Only compares the root of the chords.
* :func:`mir_eval.chord.majmin`: Only compares major, minor, and "no chord"
labels.
* :func:`mir_eval.chord.majmin_inv`: Compares major/minor chords, with
inversions. The bass note must exist in the triad.
* :func:`mir_eval.chord.mirex`: A estimated chord is considered correct if it
shares *at least* three pitch classes in common.
* :func:`mir_eval.chord.thirds`: Chords are compared at the level of major or
minor thirds (root and third), For example, both ('A:7', 'A:maj') and
('A:min', 'A:dim') are equivalent, as the third is major and minor in
quality, respectively.
* :func:`mir_eval.chord.thirds_inv`: Same as above, with inversions (bass
relationships).
* :func:`mir_eval.chord.triads`: Chords are considered at the level of triads
(major, minor, augmented, diminished, suspended), meaning that, in addition
to the root, the quality is only considered through #5th scale degree (for
augmented chords). For example, ('A:7', 'A:maj') are equivalent, while
('A:min', 'A:dim') and ('A:aug', 'A:maj') are not.
* :func:`mir_eval.chord.triads_inv`: Same as above, with inversions (bass
relationships).
* :func:`mir_eval.chord.tetrads`: Chords are considered at the level of the
entire quality in closed voicing, i.e. spanning only a single octave;
extended chords (9's, 11's and 13's) are rolled into a single octave with any
upper voices included as extensions. For example, ('A:7', 'A:9') are
equivlent but ('A:7', 'A:maj7') are not.
* :func:`mir_eval.chord.tetrads_inv`: Same as above, with inversions (bass
relationships).
* :func:`mir_eval.chord.sevenths`: Compares according to MIREX "sevenths"
rules; that is, only major, major seventh, seventh, minor, minor seventh and
no chord labels are compared.
* :func:`mir_eval.chord.sevenths_inv`: Same as above, with inversions (bass
relationships).
* :func:`mir_eval.chord.overseg`: Computes the level of over-segmentation
between estimated and reference intervals.
* :func:`mir_eval.chord.underseg`: Computes the level of under-segmentation
between estimated and reference intervals.
* :func:`mir_eval.chord.seg`: Computes the minimum of over- and
under-segmentation between estimated and reference intervals.
References
----------
.. [#harte2010towards] <NAME>. Towards Automatic Extraction of Harmony
Information from Music Signals. PhD thesis, Queen Mary University of
London, August 2010.
'''
import numpy as np
import warnings
import collections
import re
from mir_eval import util
BITMAP_LENGTH = 12
NO_CHORD = "N"
NO_CHORD_ENCODED = -1, np.array([0]*BITMAP_LENGTH), -1
X_CHORD = "X"
X_CHORD_ENCODED = -1, | np.array([-1]*BITMAP_LENGTH) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.