prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from __future__ import print_function
from signal import signal
import pandas as pd
import numpy as np
from tomlkit import boolean
from myo.utils import TimeInterval
import myo
import sys
from threading import Lock, Thread
from matplotlib import pyplot as plt
import myo
import numpy as np
from collections import deque
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
from scipy.spatial.distance import pdist, squareform #scipy spatial distance
import sklearn as sk
import sklearn.metrics.pairwise
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, LeakyReLU
from keras import metrics
from keras import backend as K
import time
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from keras.utils import np_utils
#Recurrence plot function
def recurrence_plot(s, eps=None, steps=None):
if eps==None: eps=0.1
if steps==None: steps=10
d = sk.metrics.pairwise.pairwise_distances(s)
d = np.floor(d / eps)
d[d > steps] = steps
#Z = squareform(d)cd
return d
#Initial defitions
samples =100
columns= samples + 1
rows = 8
totalSamples = samples*8
totalColumns = totalSamples+1
dimensions = (rows,columns)
dimensions2 = (rows,columns-1)
signal_header = np.zeros(801,dtype='object')
#fill the signal header with its names
for i in range(0, totalColumns):
if(i == totalColumns-1):
signal_header[i] = "gesture"
else:
signal_header[i]= "sample_ "+ str(i);
data = []
#receives the signal from the emg, saves 100 samples from each plate on the array
class EmgCollector(myo.DeviceListener):
"""
Collects EMG data in a queue with *n* maximum number of elements.
"""
def __init__(self, n):
self.n = n
self.lock = Lock()
self.emg_data_queue = deque(maxlen=n)
def get_emg_data(self):
with self.lock:
return list(self.emg_data_queue)
# myo.DeviceListener
def on_connected(self, event):
event.device.stream_emg(True)
def on_emg(self, event):
with self.lock:
self.emg_data_queue.append((event.timestamp, event.emg))
class Plot(object):
def __init__(self, listener):
self.n = listener.n
self.listener = listener
# self.fig = plt.figure()
# self.axes = [self.fig.add_subplot('81' + str(i)) for i in range(1, 9)]
# [(ax.set_ylim([-100, 100])) for ax in self.axes]
# self.graphs = [ax.plot(np.arange(self.n), np.zeros(self.n))[0] for ax in self.axes]
# plt.ion()
def update_plot(self):
emg_data = self.listener.get_emg_data()
emg_data = | np.array([x[1] for x in emg_data]) | numpy.array |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data=np.genfromtxt(path,delimiter=",",skip_header=1)
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
new_record=np.asarray(new_record)
census=np.concatenate((data,new_record),axis=0)
# --------------
#Code starts here
age=census[:,:1]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
# --------------
#Code starts here
race=census[:,2]
race=np.asarray(race)
race_0=census[np.all([race==0],axis=0)]
race_1=census[ | np.all([race==1],axis=0) | numpy.all |
import numpy as np
import scipy.special as special
def loopbrz( Ra, I0, Nturns, R, Z ):
# Input
# Ra [m] Loop radius
# I0 [A] Loop current
# Nturns Loop number of turns (windings)
# R [m] Radial coordinate of the point
# Z [m] Axial coordinate of the point
# Output
# Br, Bz [T] Radial and Axial components of B-field at (R,Z)
#
# (Note that singularities are not handled here)
mu0 = 4.0e-7 * np.pi
B0 = mu0/2.0/Ra * I0 * Nturns
alfa = np.absolute(R)/Ra
beta = Z/Ra
gamma = (Z+1.0e-10)/(R+1.0e-10)
Q = (1+alfa)**2 + beta**2
ksq = 4.0 * alfa / Q
asq = alfa * alfa
bsq = beta * beta
Qsp = 1.0/np.pi/np.sqrt(Q)
K = special.ellipk(ksq)
E = special.ellipe(ksq)
Br = gamma * B0*Qsp * ( E * (1+asq+bsq)/(Q-4.0*alfa) - K )
Bz = B0*Qsp * ( E * (1-asq-bsq)/(Q-4.0*alfa) + K )
return Br, Bz
def roto(EulerAngles):
# Classic (proper) Euler Angles (p,t,f)
# with Z-X-Z rotation sequence:
# (psi,z), (theta,x), (phi,z)
# p=psi, t=theta, f=phi angles in [rad]
p=EulerAngles[0]
t=EulerAngles[1]
f=EulerAngles[2]
sp=np.sin(p)
st=np.sin(t)
sf= | np.sin(f) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 14:35:26 2020
@author: cheritie
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as sp
from AO_modules.calibration.InteractionMatrix import interactionMatrix
from AO_modules.MisRegistration import MisRegistration
from AO_modules.mis_registration_identification_algorithm.computeMetaSensitivyMatrix import computeMetaSensitivityMatrix
from AO_modules.mis_registration_identification_algorithm.applyMisRegistration import applyMisRegistration, apply_shift_wfs
from AO_modules.tools.interpolateGeometricalTransformation import rotateImageMatrix,rotation,translationImageMatrix,translation,anamorphosis,anamorphosisImageMatrix
import skimage.transform as sk
"""
def estimateMisRegistration(nameFolder, nameSystem, tel, atm, ngs, dm_0, wfs, basis, calib_in, misRegistrationZeroPoint, epsilonMisRegistration, param, precision = 3, gainEstimation = 1, return_all = False):
Compute the set of sensitivity matrices required to identify the mis-registrations.
%%%%%%%%%%%%%%%% -- INPUTS -- %%%%%%%%%%%%%%%%
_ nameFolder : folder to store the sensitivity matrices.
_ nameSystem : name of the AO system considered. For instance 'ELT_96x96_R_band'
_ tel : telescope object
_ atm : atmosphere object
_ ngs : source object
_ dm_0 : deformable mirror with reference configuration of mis-registrations
_ pitch : pitch of the dm in [m]
_ wfs : wfs object
_ basis : basis to use to compute the sensitivity matrices. Basis should be an object with the following fields:
basis.modes : [nActuator x nModes] matrix containing the commands to apply the modal basis on the dm
basis.indexModes : indexes of the modes considered in the basis. This is used to name the sensitivity matrices
basis.extra : extra name to name the sensitivity matrices for instance 'KL'
_ precision : precision to round the parameter estimation. Equivalent to np.round(misReg_estimation,precision)
_ gainEstimation : gain to apply after one estimation. eventually allows to avoid overshoots.
_ return_all : if true, returns all the estimations at every step of the algorithm
_ misRegistrationZeroPoint : mis-registration around which you want to compute the sensitivity matrices
_ epsilonMisRegistration : epsilon value to apply
_ param : dictionnary used as parameter file
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The function returns the meta-sensitivity matrix that contains all the individual sensitivity matrices reshaped as a vector and concatenated.
%%%%%%%%%%%%%%%% -- OUTPUTS -- %%%%%%%%%%%%%%%%
_ misRegistration_out : mis-registration object corresponding to the convergence value
_ scalingFactor_values : scaling factor (for each mode of the modal basis) for each iteration to take into consideration eventual gains variation between data and model
_ misRegistration_values : mis-registration values for each iteration
"""
def estimateMisRegistration(nameFolder, nameSystem, tel, atm, ngs, dm_0, wfs, basis, calib_in, misRegistrationZeroPoint, epsilonMisRegistration, param, precision = 3, gainEstimation = 1, sensitivity_matrices = None, return_all = False, fast = False, wfs_mis_registrated = None, nIteration = 3):
#%% ---------- LOAD/COMPUTE SENSITIVITY MATRICES --------------------
# compute the sensitivity matrices. if the data already exits, the files will be loaded
# WARNING: The data are loaded only if the name of the requeste files matches the ones in argument of this function.
# make sure that these files are well corresponding to the system you are working with.
if sensitivity_matrices is None:
[metaMatrix,calib_0] = computeMetaSensitivityMatrix(nameFolder = nameFolder,\
nameSystem = nameSystem,\
tel = tel,\
atm = atm,\
ngs = ngs,\
dm_0 = dm_0,\
pitch = dm_0.pitch,\
wfs = wfs,\
basis = basis,\
misRegistrationZeroPoint = misRegistrationZeroPoint,\
epsilonMisRegistration = epsilonMisRegistration,\
param = param,\
wfs_mis_registrated = wfs_mis_registrated)
else:
metaMatrix = sensitivity_matrices
#%% ---------- ITERATIVE ESTIMATION OF THE PARAMETERS --------------------
stroke = 1e-12
criteria = 0
n_mis_reg = metaMatrix.M.shape[0]
misRegEstBuffer = np.zeros(n_mis_reg)
scalingFactor_values = [1]
misRegistration_values = [np.zeros(n_mis_reg)]
epsilonMisRegistration_field = ['shiftX','shiftY','rotationAngle','radialScaling','tangentialScaling']
i=0
tel.isPaired = False
misRegistration_out = MisRegistration(misRegistrationZeroPoint)
if fast:
from AO_modules.calibration.InteractionMatrix import interactionMatrixFromPhaseScreen
dm_0.coefs = np.squeeze(basis.modes)
tel*dm_0
input_modes_0 = dm_0.OPD
input_modes_cp = input_modes_0.copy()
while criteria ==0:
i=i+1
# temporary deformable mirror
if np.ndim(input_modes_0)==2:
if wfs_mis_registrated is not None:
misRegistration_wfs = MisRegistration()
misRegistration_wfs.shiftX = misRegistration_out.shiftX
misRegistration_wfs.shiftY = misRegistration_out.shiftY
misRegistration_dm = MisRegistration()
misRegistration_dm.rotationAngle = misRegistration_out.rotationAngle
apply_shift_wfs(wfs, misRegistration_wfs.shiftX / (wfs.nSubap/wfs.telescope.D), misRegistration_wfs.shiftY/ (wfs.nSubap/wfs.telescope.D))
input_modes_cp = tel.pupil*apply_mis_reg(tel,input_modes_0, misRegistration_dm)
else:
input_modes_cp = tel.pupil*apply_mis_reg(tel,input_modes_0, misRegistration_out)
else:
for i_modes in range(input_modes_0.shape[2]):
if wfs_mis_registrated is not None:
misRegistration_wfs = MisRegistration()
misRegistration_wfs.shiftX = misRegistration_out.shiftX
misRegistration_wfs.shiftY = misRegistration_out.shiftY
misRegistration_dm = MisRegistration()
misRegistration_dm.rotationAngle = misRegistration_out.rotationAngle
apply_shift_wfs(wfs, misRegistration_wfs.shiftX / (wfs.nSubap/wfs.telescope.D), misRegistration_wfs.shiftY/ (wfs.nSubap/wfs.telescope.D))
input_modes_cp[:,:,i_modes] = tel.pupil*apply_mis_reg(tel,input_modes_0[:,:,i_modes], misRegistration_dm)
else:
input_modes_cp[:,:,i_modes] = tel.pupil*apply_mis_reg(tel,input_modes_0[:,:,i_modes], misRegistration_out)
# temporary interaction matrix
calib_tmp = interactionMatrixFromPhaseScreen(ngs,atm,tel,wfs,input_modes_cp,stroke,phaseOffset=0,nMeasurements=50,invert=False,print_time=False)
# temporary scaling factor
try:
scalingFactor_tmp = np.round(np.diag(calib_tmp.D.T@calib_in.D)/ np.diag(calib_tmp.D.T@calib_tmp.D),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.reshape( [email protected](1/scalingFactor_tmp) - calib_tmp.D ,calib_in.D.shape[0]*calib_in.D.shape[1]))
except:
scalingFactor_tmp = np.round(np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_in.D))/ np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_tmp.D)),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.squeeze((np.squeeze(calib_in.D)*(1/scalingFactor_tmp)) - np.squeeze(calib_tmp.D)))
# cumulative mis-registration
misRegEstBuffer+= np.round(misReg_tmp,precision)
# define the next working point to adjust the scaling factor
for i_mis_reg in range(n_mis_reg):
setattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg], getattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg]) + np.round(misReg_tmp[i_mis_reg],precision))
# save the data for each iteration
scalingFactor_values.append(np.copy(scalingFactor_tmp))
misRegistration_values.append(np.copy(misRegEstBuffer))
if i==nIteration:
criteria =1
else:
while criteria ==0:
i=i+1
# temporary deformable mirror
dm_tmp = applyMisRegistration(tel,misRegistration_out,param, wfs = wfs_mis_registrated,print_dm_properties=False,floating_precision=dm_0.floating_precision)
# temporary interaction matrix
calib_tmp = interactionMatrix(ngs,atm,tel,dm_tmp,wfs,basis.modes,stroke,phaseOffset=0,nMeasurements=50,invert=False,print_time=False)
# erase dm_tmp to free memory
del dm_tmp
# temporary scaling factor
try:
scalingFactor_tmp = np.round(np.diag(calib_tmp.D.T@calib_in.D)/ np.diag(calib_tmp.D.T@calib_tmp.D),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.reshape( [email protected](1/scalingFactor_tmp) - calib_tmp.D ,calib_in.D.shape[0]*calib_in.D.shape[1]))
except:
scalingFactor_tmp = np.round(np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_in.D))/ np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_tmp.D)),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.squeeze((np.squeeze(calib_in.D)*(1/scalingFactor_tmp)) - np.squeeze(calib_tmp.D)))
# cumulative mis-registration
misRegEstBuffer+= np.round(misReg_tmp,precision)
# define the next working point to adjust the scaling factor
for i_mis_reg in range(n_mis_reg):
setattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg], getattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg]) + np.round(misReg_tmp[i_mis_reg],precision))
# save the data for each iteration
scalingFactor_values.append(np.copy(scalingFactor_tmp))
misRegistration_values.append(np.copy(misRegEstBuffer))
if i==nIteration:
criteria =1
misRegistration_out.shiftX = np.round(misRegistration_out.shiftX,precision)
misRegistration_out.shiftY = | np.round(misRegistration_out.shiftY,precision) | numpy.round |
import pandas as pd
import numpy as np
from collections import defaultdict
from utils.save_models import Save
from utils.functions import mean, total
class PrsScreeningNoMRI(Save):
"""Build cohort of risk-tailored screening using biopsy-first approach
to diagnosis, following pre-2019 NICE guidelines."""
def __init__(self, params, a_risk, od_by_risk:bool=False):
self.cohort_list = []
self.outcomes_list = []
self.simulations = defaultdict(list)
self.run_model = self._run_model(params, a_risk, od_by_risk)
def _run_model(self, params, a_risk, od_by_risk):
# Loop through age cohorts
for year in np.arange(55, 70):
pca_incidence_prs = params.pca_incidence.copy()
pca_incidence_prs[:,10:25] = (pca_incidence_prs[:,10:25].T * params.rr_incidence[year-45,:]).T
pca_incidence_prs[:,25:35] = pca_incidence_prs[:,25:35] * np.linspace(params.post_sc_incidence_drop,1,10)
pca_incidence_prs = pca_incidence_prs[:, year-45:]
# Death from PCa
pca_mortality_prs = params.pca_death_baseline.copy()
pca_mortality_prs[:,10:15] = pca_mortality_prs[:,10:15] * np.linspace(1,0.8,5)
pca_mortality_prs[:,15:] = pca_mortality_prs[:,15:] * params.rr_death_screening[:,15:]
pca_mortality_prs = pca_mortality_prs[:, year-45:]
# Probability of being screened
p_screened = np.array(params.uptake_prs * a_risk.loc[year,'p_above_threshold'])
p_ns = np.array((1-params.uptake_prs) * a_risk.loc[year,'p_above_threshold'])
p_nos = np.array(params.compliance * (1-a_risk.loc[year,'p_above_threshold']))
p_nos_screened = np.array((1-params.compliance) * (1-a_risk.loc[year,'p_above_threshold']))
if year < 55:
# Yearly probability of PCa incidence
p_pca_screened = params.pca_incidence[:, year-45:]
p_pca_ns = params.pca_incidence[:, year-45:]
p_pca_nos = params.pca_incidence[:, year-45:]
p_pca_nos_screened = params.pca_incidence[:, year-45:]
# Yearly probability of death from PCa
p_pca_death_screened = params.pca_death_baseline[:, year-45:]
p_pca_death_ns = params.pca_death_baseline[:, year-45:]
p_pca_death_nos = params.pca_death_baseline[:, year-45:]
p_pca_death_nos_screened = params.pca_death_baseline[:, year-45:]
# Proportion of cancers detected by screening at a localised / advanced stage
advanced_stage_sc = params.stage_adv_ns_psa[:, year-45:]
advanced_stage_ns = params.stage_adv_ns_psa[:, year-45:]
advanced_stage_nos_sc = params.stage_adv_ns_psa[:, year-45:]
advanced_stage_nos = params.stage_adv_ns_psa[:, year-45:]
localised_stage_sc = params.stage_local_ns_psa[:, year-45:]
localised_stage_ns = params.stage_local_ns_psa[:, year-45:]
localised_stage_nos_sc = params.stage_local_ns_psa[:, year-45:]
localised_stage_nos = params.stage_local_ns_psa[:, year-45:]
elif year > 54:
# Yearly probability of PCa incidence
p_pca_screened = pca_incidence_prs * a_risk.loc[year, 'rr_high']
p_pca_ns = params.pca_incidence[:, year-45:] * a_risk.loc[year,'rr_high']
p_pca_nos = params.pca_incidence[:, year-45:] * a_risk.loc[year,'rr_low']
p_pca_nos_screened = pca_incidence_prs * a_risk.loc[year,'rr_low']
# Yearly probability of death from PCa
p_pca_death_screened = pca_mortality_prs * a_risk.loc[year,'rr_high']
p_pca_death_ns = params.pca_death_baseline[:, year-45:] * a_risk.loc[year,'rr_high']
p_pca_death_nos = params.pca_death_baseline[:, year-45:] * a_risk.loc[year,'rr_low']
p_pca_death_nos_screened = pca_mortality_prs * a_risk.loc[year,'rr_low']
# Proportion of cancers detected by screening at a localised / advanced stage
advanced_stage_sc = params.stage_adv_screened_psa[:, year-45:]
localised_stage_sc = params.stage_local_screened_psa[:, year-45:]
advanced_stage_ns = params.stage_adv_ns_psa[:, year-45:]
localised_stage_ns = params.stage_local_ns_psa[:, year-45:]
advanced_stage_nos_sc = params.stage_adv_screened_psa[:, year-45:]
localised_stage_nos_sc = params.stage_local_screened_psa[:, year-45:]
advanced_stage_nos = params.stage_adv_ns_psa[:, year-45:]
localised_stage_nos = params.stage_local_ns_psa[:, year-45:]
mortality_other_causes = params.death_other_causes[:, year-45:]
tx_costs_local = params.tx_costs * params.tx.localised.values
tx_costs_adv = params.tx_costs * params.tx.advanced.values
#####################
# Year 1 in the model
#####################
age = np.arange(year,90)
length_df = len(age)
length_screen = len(np.arange(year,70)) # number of screening years depending on age cohort starting
# Cohorts, numbers 'healthy', and incident cases
cohort_sc = np.array([np.repeat(params.pop.loc[year, :], length_df)]*params.sims) * p_screened
cohort_ns = np.array([np.repeat(params.pop.loc[year, :], length_df)]*params.sims) * p_ns
cohort_nos = np.array([np.repeat(params.pop.loc[year, :], length_df)]*params.sims) * p_nos
cohort_nos_sc = np.array([np.repeat(params.pop.loc[year, :], length_df)]*params.sims) * p_nos_screened
pca_alive_sc = np.array([np.zeros(length_df)]*params.sims)
pca_alive_ns = np.array([np.zeros(length_df)]*params.sims)
pca_alive_nos = np.array([np.zeros(length_df)]*params.sims)
pca_alive_nos_sc = np.array([np.zeros(length_df)]*params.sims)
healthy_sc = cohort_sc - pca_alive_sc
healthy_ns = cohort_ns - pca_alive_ns
healthy_nos = cohort_nos - pca_alive_nos
healthy_nos_sc = cohort_nos_sc - pca_alive_nos_sc
pca_incidence_sc = healthy_sc * p_pca_screened
pca_incidence_nos_sc = healthy_nos_sc * p_pca_nos_screened
if year > 54:
pca_incidence_screened = pca_incidence_sc.copy() # Screen-detected cancers
pca_incidence_post_screening = np.array([np.zeros(length_df)]*params.sims) # Post-screening cancers - 0 until model reaches age 70.
pca_incidence_nos_sc_screened = pca_incidence_nos_sc.copy() # Screen-detected cancers
pca_incidence_nos_sc_post_screening = np.array([np.zeros(length_df)]*params.sims) # Post-screening cancers - 0 until model reaches age 70.
elif year < 55:
# Zero as no screening in any of these cohorts
pca_incidence_screened = np.array([np.zeros(length_df)]*params.sims)
pca_incidence_post_screening = np.array([np.zeros(length_df)]*params.sims)
pca_incidence_nos_sc_screened = np.array([np.zeros(length_df)]*params.sims)
pca_incidence_nos_sc_post_screening = np.array([np.zeros(length_df)]*params.sims)
pca_incidence_ns = healthy_ns * p_pca_ns
pca_incidence_nos = healthy_nos * p_pca_nos
# Deaths
pca_death_sc = ((pca_alive_sc * p_pca_death_screened)
+ (healthy_sc * p_pca_death_screened))
pca_death_ns = ((pca_alive_ns * p_pca_death_ns)
+ (healthy_ns * p_pca_death_ns))
pca_death_nos = ((pca_alive_nos * p_pca_death_nos)
+ (healthy_nos * p_pca_death_nos))
pca_death_nos_sc = ((pca_alive_nos_sc * p_pca_death_nos_screened)
+ (healthy_nos_sc * p_pca_death_nos_screened))
pca_death_other_sc = ((pca_incidence_sc
+ pca_alive_sc
- pca_death_sc)
* mortality_other_causes)
pca_death_other_ns = ((pca_incidence_ns
+ pca_alive_ns
- pca_death_ns)
* mortality_other_causes)
pca_death_other_nos = ((pca_incidence_nos
+ pca_alive_nos
- pca_death_nos)
* mortality_other_causes)
pca_death_other_nos_sc = ((pca_incidence_nos_sc
+ pca_alive_nos_sc
- pca_death_nos_sc)
* mortality_other_causes)
healthy_death_other_sc = (healthy_sc-pca_incidence_sc) * mortality_other_causes
healthy_death_other_ns = (healthy_ns-pca_incidence_ns) * mortality_other_causes
healthy_death_other_nos = (healthy_nos-pca_incidence_nos) * mortality_other_causes
healthy_death_other_nos_sc = (healthy_nos_sc-pca_incidence_nos_sc) * mortality_other_causes
total_death_sc = (pca_death_sc
+ pca_death_other_sc
+ healthy_death_other_sc)
total_death_ns = (pca_death_ns
+ pca_death_other_ns
+ healthy_death_other_ns)
total_death_nos = (pca_death_nos
+ pca_death_other_nos
+ healthy_death_other_nos)
total_death_nos_sc = (pca_death_nos_sc
+ pca_death_other_nos_sc
+ healthy_death_other_nos_sc)
total_death = (total_death_sc
+ total_death_ns
+ total_death_nos
+ total_death_nos_sc)
# Prevalent cases & life-years
pca_prevalence_sc = (pca_incidence_sc
- pca_death_sc
- pca_death_other_sc)
pca_prevalence_ns = (pca_incidence_ns
- pca_death_ns
- pca_death_other_ns)
pca_prevalence_nos = (pca_incidence_nos
- pca_death_nos
- pca_death_other_nos)
pca_prevalence_nos_sc = (pca_incidence_nos_sc
- pca_death_nos_sc
- pca_death_other_nos_sc)
lyrs_pca_sc_nodiscount = pca_prevalence_sc * 0.5
lyrs_pca_ns_nodiscount = pca_prevalence_ns * 0.5
lyrs_pca_nos_nodiscount = pca_prevalence_nos * 0.5
lyrs_pca_nos_sc_nodiscount = pca_prevalence_nos_sc * 0.5
# Costs
if year > 54:
costs_tx_sc = np.array([np.zeros(length_df)]*params.sims)
costs_tx_screened = np.array([np.zeros(length_df)]*params.sims)
costs_tx_post_screening = np.array([np.zeros(length_df)]*params.sims)
costs_tx_screened[:, 0] = ((pca_incidence_screened[:, 0]
* localised_stage_sc[:, 0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:, 0]
* advanced_stage_sc[:, 0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:, 0] = ((pca_incidence_post_screening[:, 0]
* localised_stage_ns[:, 0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:, 0]
* advanced_stage_ns[:, 0].T
* tx_costs_adv.T).sum(axis=0)
* params.relative_cost_clinically_detected[:, 0]) # cost of post-screening cancers
costs_tx_sc[:, 0] = costs_tx_screened[:, 0] + costs_tx_post_screening[:, 0] # total cost in screened arms
costs_tx_nos_sc = np.array([np.zeros(length_df)] * params.sims)
costs_tx_nos_sc_screened = np.array([np.zeros(length_df)] * params.sims)
costs_tx_nos_sc_post_screening = np.array([np.zeros(length_df)] * params.sims)
costs_tx_nos_sc_screened[:, 0] = ((pca_incidence_nos_sc_screened[:, 0]
* localised_stage_nos_sc[:, 0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_screened[:, 0]
* advanced_stage_nos_sc[:, 0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_nos_sc_post_screening[:, 0] = ((pca_incidence_nos_sc_post_screening[:, 0]
* localised_stage_nos[:, 0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_post_screening[:, 0]
* advanced_stage_nos[:, 0].T
* tx_costs_adv.T).sum(axis=0)
* params.relative_cost_clinically_detected[:, 0]) # cost of post-screening cancers
costs_tx_nos_sc[:, 0] = costs_tx_nos_sc_screened[:, 0] + costs_tx_nos_sc_post_screening[:, 0] # total cost in screened arms
elif year < 55:
costs_tx_sc = np.array([np.zeros(length_df)] * params.sims)
costs_tx_sc[:, 0] = ((pca_incidence_sc[:, 0]
* localised_stage_ns[:, 0].T # rr_adv_scr shouldn't apply as no screening < 55
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:, 0]
* advanced_stage_ns[:, 0].T
* tx_costs_adv.T).sum(axis=0)
* params.relative_cost_clinically_detected[:, 0])
costs_tx_nos_sc = np.array([ | np.zeros(length_df) | numpy.zeros |
import json
import logging
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from gensim import matutils
from gensim.matutils import kullback_leibler, jensen_shannon
from gensim.models import CoherenceModel
from matplotlib.ticker import MaxNLocator
from gensim.models.ldamulticore import LdaMulticore
from tqdm.auto import tqdm
from operator import itemgetter
import itertools
from octis.models.NeuralLDA import NeuralLDA
import train_lda
def diff(topic_model_1, topic_model_2, distance="jensen_shannon", normed=True):
"""Calculate the difference in topic distributions between two models.
Parameters
----------
topic_model_1 : numpy.ndarray
The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
topic_model_2 : numpy.ndarray
The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
distance : {'kullback_leibler', 'jensen_shannon'}
The distance metric to calculate the difference with.
normed : bool, optional
Whether the matrix should be normalized or not.
Returns
-------
numpy.ndarray
A difference matrix. Each element corresponds to the difference between the two topics,
shape (`self.num_topics`, `other.num_topics`)
"""
distances = {
"kullback_leibler": kullback_leibler,
"jensen_shannon": jensen_shannon
}
if distance not in distances:
valid_keys = ", ".join("`{}`".format(x) for x in distances.keys())
raise ValueError("Incorrect distance, valid only {}".format(valid_keys))
distance_func = distances[distance]
# retrieve topic-word distributions from topic models
if isinstance(topic_model_1, LdaMulticore) and isinstance(topic_model_2, LdaMulticore):
d1, d2 = topic_model_1.get_topics(), topic_model_2.get_topics()
elif isinstance(topic_model_1, NeuralLDA) and isinstance(topic_model_2, NeuralLDA):
d1, d2 = topic_model_1.model.get_topic_word_mat(), topic_model_2.model.get_topic_word_mat()
else:
raise ValueError(f">> Error: topic models are not the same instance")
t1_size, t2_size = d1.shape[0], d2.shape[0]
# initialize z
z = np.zeros((t1_size, t2_size))
# iterate over each cell in the initialized z
for topic in np.ndindex(z.shape):
topic1 = topic[0]
topic2 = topic[1]
# calculate jensen-shannon distance
z[topic] = distance_func(d1[topic1], d2[topic2])
if normed:
if np.abs(np.max(z)) > 1e-8:
z /= np.max(z)
return z
def score_by_topic_coherence(model, texts, corpus, dictionary, topn=20):
"""
Calculates c_v coherence score
Note: This is not working stable if texts/corpus contains empty documents and if there are words that do not appear in the whole corpus.
Solution: Remove all empty documents on load and edit log_ratio_measure() in direct_confirmation_measure.py and
_cossim in indirect_confirmation_measure.py in gensim.topic_coherence as there could be division by zero! (add EPSILON to denominator)
:param model: LdaModel or NeuralLDA model
:param texts: corpus
:param corpus: list of list of int
:param dictionary: dictionary
:param topn: int, optional
Integer corresponding to the number of top words to be extracted from each topic.
:return: int, the score
"""
# retrieve topic-word distributions from topic models
if isinstance(model, LdaMulticore):
topics = model.get_topics()
elif isinstance(model, NeuralLDA):
topics = model.model.get_topic_word_mat()
else:
raise ValueError(f">> Error: topic model instance not defined")
topics_ = [matutils.argsort(topic, topn=topn, reverse=True) for topic in topics]
# calculate coherence score
score = CoherenceModel(processes=48, topics=topics_, texts=texts, corpus=corpus, dictionary=dictionary, coherence='c_v', topn=topn).get_coherence()
return score
def score_by_topic_corpus_probability(topic_model_1, topic_model_2, corpus_1=None, corpus_2=None, documents_1=None, documents_2=None, distance='jensen_shannon'):
"""
Calculates the score by 'distance' and weights the strongest similarities by their topic probability over the whole corpus.
Note: 1. Is not stable for neural topic models as they are very sensitive on the change of the input value magnitude they are trained on.
Meaning if they were trained on a word count of 1000 it is unstable to predict on a word count of 10000
2. The difference of using filtered documents vs unfiltered is less than 0.03
:param topic_model_1: LdaModel or NeuralLDA model
:param topic_model_2: LdaModel or NeuralLDA model
:param corpus_1: list of (int, int)
:param corpus_2: list of (int, int)
:param documents_1: list of str
:param documents_2: list of str
:param distance: str, distance, e.g. jensen_shannon
:return: int, the tp score
"""
# calculate the difference matrix
mdiff1 = diff(topic_model_1, topic_model_2, distance=distance)
mdiff2 = diff(topic_model_2, topic_model_1, distance=distance)
# select the best match
min1 = np.amin(mdiff1, axis=1) # smaller ~ more similar, take the most similar score for each topic
min2 = np.amin(mdiff2, axis=1) # smaller ~ more similar, take the most similar score for each topic
if isinstance(topic_model_1, LdaMulticore) and isinstance(topic_model_2, LdaMulticore):
assert topic_model_1.num_topics == topic_model_2.num_topics, ">> ERROR: Not the same amount of topics"
assert corpus_1 is not None and corpus_2 is not None, ">> ERROR: At least one 'corpus' is None"
num_topics = topic_model_1.num_topics
topic_corpus_prob_1 = np.zeros(num_topics)
topic_corpus_prob_2 = np.zeros(num_topics)
# Get topic probability distribution for the whole corpus
probas_1 = topic_model_1.get_document_topics(list(itertools.chain.from_iterable(corpus_1)), minimum_probability=0.0)
probas_2 = topic_model_2.get_document_topics(list(itertools.chain.from_iterable(corpus_2)), minimum_probability=0.0)
for key, val in probas_1:
topic_corpus_prob_1[key] = val
for key, val in probas_2:
topic_corpus_prob_2[key] = val
elif isinstance(topic_model_1, NeuralLDA) and isinstance(topic_model_2, NeuralLDA):
assert topic_model_1.model.num_topics == topic_model_2.model.num_topics, ">> ERROR: Not the same amount of topics"
assert documents_1 is not None and documents_2 is not None, ">> ERROR: At least one 'documents' is None"
# Concatenate all documents and calculate topic probability distribution for the whole corpus
data_corpus = [' '.join(list(itertools.chain.from_iterable(documents_1)))]
x_train, input_size = topic_model_1.preprocess(topic_model_1.vocab, train=data_corpus)
topic_corpus_prob_1 = topic_model_1.model.get_thetas(x_train).T
data_corpus = [' '.join(list(itertools.chain.from_iterable(documents_2)))]
x_train, input_size = topic_model_2.preprocess(topic_model_2.vocab, train=data_corpus)
topic_corpus_prob_2 = topic_model_2.model.get_thetas(x_train).T
else:
raise ValueError(">> Error: topic models are not the same instance")
# weigh the best matches by their probability and take the mean of both
return (np.matmul(topic_corpus_prob_1, min1) + np.matmul(topic_corpus_prob_2, min2)) / 2
def score_by_top_topic_corpus_probability(topic_model_1, topic_model_2, corpus_1, corpus_2, distance='jensen_shannon'):
"""
Calculates the score by 'distance' and weights the strongest similarities
by the respective normed sum of the most probable topic for each document over the whole corpus
:param topic_model_1: LdaModel or NeuralLDA model
:param topic_model_2: LdaModel or NeuralLDA model
:param corpus_1: list of (int, int)
:param corpus_2: list of (int, int)
:param distance: str, distance, e.g. jensen_shannon
:return: int, the tt score
"""
# calculate the difference matrix
mdiff1 = diff(topic_model_1, topic_model_2, distance=distance)
mdiff2 = diff(topic_model_2, topic_model_1, distance=distance)
min1 = np.amin(mdiff1, axis=1)
min2 = np.amin(mdiff2, axis=1)
if isinstance(topic_model_1, LdaMulticore) and isinstance(topic_model_2, LdaMulticore):
from pathos.multiprocessing import ProcessingPool as Pool
def prob_list(topic_model, corpus):
# helper function to retrieve the most probable topic per document
cnt = np.zeros(topic_model.num_topics)
for doc in corpus:
topic_prob_list = topic_model.get_document_topics(doc, minimum_probability=0.0)
topic_prob_tupel = max(topic_prob_list, key=itemgetter(1))
cnt[topic_prob_tupel[0]] += 1
return cnt
workers = 8
pool = Pool(ncpus=workers)
# retrieve the most probable topic per document
logging.info("First split")
cnt_split = pool.map(lambda x: prob_list(topic_model_1, x), list(train_lda.split(corpus_1, workers)))
cnt1 = np.sum(cnt_split, axis=0)
assert len(cnt1) == topic_model_1.num_topics, f">> ERROR: Count changed to {len(cnt1)}"
logging.info("Second split")
cnt_split = pool.map(lambda x: prob_list(topic_model_2, x), list(train_lda.split(corpus_2, workers)))
cnt2 = np.sum(cnt_split, axis=0)
assert len(cnt2) == topic_model_2.num_topics
elif isinstance(topic_model_1, NeuralLDA) and isinstance(topic_model_2, NeuralLDA):
num_topics = topic_model_1.model.num_topics
cnt1 = np.zeros(num_topics)
cnt2 = np.zeros(num_topics)
# retrieve the most probable topic per document
topic_prob_list = topic_model_1.model.get_thetas(topic_model_1.model.train_data)
for i in topic_prob_list.argmax(axis=1):
cnt1[i] += 1
topic_prob_list = topic_model_2.model.get_thetas(topic_model_2.model.train_data)
for i in topic_prob_list.argmax(axis=1):
cnt2[i] += 1
else:
raise ValueError(">> Error: topic models are not the same instance")
# norm the topic counts the maximum number of documents
topic_corpus_prob_1 = cnt1 / np.sum(cnt1)
topic_corpus_prob_2 = cnt2 / np.sum(cnt2)
# weigh the best matches by their topic probability and take the mean of both
return ( | np.matmul(topic_corpus_prob_1, min1) | numpy.matmul |
from env_common import get_screen
from common import select_action_policy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import RMSprop
from itertools import count
import numpy as np
import gym
import visdom
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
printout_freq = 1
num_episodes = 300
batch_size = 5
learning_rate_policy = 0.001
learning_rate_value = 0.001
gamma = 0.99
lam = 0.99 # lambda for GAE-lambda
train_v_iters = 10
train_pi_iters = 10
clip_ratio=0.1 # how far can new policy deviate from old policy
# Initialize visualization
viz = visdom.Visdom()
loss_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Loss', title='Training loss'))
episode_length_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Episode length', title='Episode length'))
# Initialize environment and replay buffer
env = gym.make("CartPole-v0")
env.reset()
init_screen = get_screen(env)
_, _, screen_height, screen_width = init_screen.shape
num_actions = env.action_space.n
class Buffer:
def __init__(self, gamma, lam):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
self.gamma = gamma
self.lam = lam
def add(self, state, action, logp, value, reward):
self.buffer.append((state, action, logp, value, reward))
def get(self, i):
"""Return state, action, log probability of action, discounted advantage and discounted reward at i.
Requires that finalize() has been called previously to calculate
discounted rewards.
"""
if i >= len(self.buffer) or i >= len(self.advantages) or i >= len(self.discounted_rewards):
return None
else:
state, action, logp, _, _ = self.buffer[i]
reward = self.discounted_rewards[i]
advantage = self.advantages[i]
return state, torch.FloatTensor([action]).to(device), logp, advantage, reward
def finalize(self):
"""Call at end of sample collection to calculate advantages and discounted rewards.
"""
_, _, _, values, rewards = zip(*self.buffer)
# Calculate advantages
self.advantages = [0] * len(self.buffer)
for i in range(len(self.advantages)-1):
if rewards[i] != 0: # if reward is zero, we ended the episode
delta = rewards[i] + self.gamma * values[i+1] - values[i]
self.advantages[i] = delta.item()
# Discount advantages
running_add = 0
for i in reversed(range(len(self.advantages))):
if self.advantages[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma * self.lam + self.advantages[i]
self.advantages[i] = running_add
# Normalize advantages
adv_mean = np.mean(self.advantages)
adv_std = np.std(self.advantages)
for i in range(steps):
self.advantages[i] = (self.advantages[i] - adv_mean) / adv_std
# Calculate discounted rewards
self.discounted_rewards = [0] * len(self.buffer)
running_add = 0
for i in reversed(range(len(self.discounted_rewards))):
if rewards[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma + rewards[i]
self.discounted_rewards[i] = running_add
def empty(self):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
buffer = Buffer(gamma, lam)
class PolicyNet(nn.Module):
def __init__(self, h, w, outputs):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
policy_network = PolicyNet(screen_height, screen_width, num_actions).to(device)
value_network = PolicyNet(screen_height, screen_width, 1).to(device)
optimizer_policy = RMSprop(policy_network.parameters(), lr=learning_rate_policy)
optimizer_value = RMSprop(value_network.parameters(), lr=learning_rate_value)
# Store duration of episodes to test performance
episode_durations = []
# Training loop
steps = 0
training_step = 0
for episode in range(num_episodes):
env.reset()
last_screen = get_screen(env)
current_screen = get_screen(env)
state = current_screen - last_screen
state = state.to(device)
for t in count():
action, logp, val = select_action_policy(state, policy_network, value_network)
_, reward, done, _ = env.step(action)
# Move to next state
last_screen = current_screen
current_screen = get_screen(env)
next_state = current_screen - last_screen
next_state = next_state.to(device)
# To mark boundarys between episodes
if done:
reward = 0
buffer.add(state, float(action), logp, val, reward)
state = next_state
steps += 1
if done:
episode_durations.append(t + 1)
viz.line(X=torch.ones((1, 1))*episode, Y=torch.ones((1, 1)) * episode_durations[-1],
win=episode_length_window, update='append', name='Episode durations')
# Plot 50 episode averages
if len(episode_durations) >= 50:
mean = | np.mean(episode_durations[-50:]) | numpy.mean |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Demo of using VoteNet 3D object detector to detect objects from a point cloud.
"""
import os
import sys
import numpy as np
import argparse
import importlib
import time
import glob
import open3d as o3d
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'models'))
import pc_util
from ap_helper import parse_predictions
from dump_helper import softmax
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import DC
class App:
def __init__(self, args):
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
self.flag_exit = False
self.visualize = args.visualize
self.save_data = args.save_data
self.output_dir = args.output_dir
self.kinect_setup(args)
self.num_point = args.num_point
self.DUMP_CONF_THRESH = args.threshold # Dump boxes with obj prob larger than that.
self.fx, self.fy, self.cx, self.cy = [600.5037841796875, 600.29217529296875, 639.47564697265625, 365.94244384765625]
self.eval_config_dict = {'remove_empty_box': True, 'use_3d_nms': True, 'nms_iou': 0.25,
'use_old_type_nms': False, 'cls_nms': False, 'per_class_proposal': False,
'conf_thresh': self.DUMP_CONF_THRESH, 'dataset_config': DC}
demo_dir = os.path.join(BASE_DIR, 'demo_files')
if args.pretrained_model == 'sunrgbd':
checkpoint_path = os.path.join(demo_dir, 'pretrained_votenet_on_sunrgbd.tar')
else:
checkpoint_path = os.path.join(demo_dir, 'pretrained_votenet_on_scannet.tar')
# Init the model and optimzier
self.MODEL = importlib.import_module('votenet') # import network module
self.torch_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net = self.MODEL.VoteNet(num_proposal=256, input_feature_dim=1, vote_factor=1,
sampling='seed_fps', num_class=DC.num_class,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr).to(self.torch_device)
logging.info('Constructed model.')
# Load checkpoint
optimizer = optim.Adam(self.net.parameters(), lr=0.001)
checkpoint = torch.load(checkpoint_path)
self.net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
logging.info("Loaded checkpoint %s (epoch: %d)"%(checkpoint_path, epoch))
self.net.eval() # set model to eval mode (for bn and dp)
def preprocess_point_cloud(self, point_cloud):
''' Prepare the numpy point cloud (N,3) for forward pass '''
point_cloud = point_cloud[:,0:3] # do not use color for now
floor_height = | np.percentile(point_cloud[:,2],0.99) | numpy.percentile |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import argparse, json
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
from matplotlib.offsetbox import *
from PIL import Image
from utils.experiments import load_data
def load_image(path):
img = Image.open(path)
img = img.resize((32, 32))
return np.array(img).squeeze()
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='tSNE Plot')
arg_parser.add_argument('task', choices=['mnist', 'cifar', 'bam'], help='name of the task (mnist, cifar)')
arg_parser.add_argument('data_path', help='path to data (not required for original MNIST)')
arg_parser.add_argument('data_split', choices=['train', 'test'], default='test', help='data split (train, test (default))')
arg_parser.add_argument('latent_path', help='path to numpy latent vectors')
arg_parser.add_argument('out_path', help='path to output')
arg_parser.add_argument('--num_points', type=int, default=1000, help='number of data points to plot (default: 1000)')
arg_parser.add_argument('--remove_outliers', type=float, default=0., help='removes outliers outside of n times the standard deviation (default: False)')
arg_parser.add_argument('--eval_task', default='', help='if path to eval JSON is provided, only data points from the task are plotted (default: None)')
arg_parser.add_argument('--tsne_latents', default='', help='if path to tSNE latents is provided, repeated projection will be skipped (default: None)')
args = arg_parser.parse_args()
# load data
images, labels, label_descs, num_labels = load_data(args.task, split=args.data_split, data_path=args.data_path)
cls_idx_map = [i for i in range(num_labels)]
latents = np.load(args.latent_path)
print("Loaded %d latents with dimensionality %d." % (latents.shape[0], latents.shape[1]))
# tSNE
if len(args.tsne_latents) > 0:
tsne_latents = np.load(args.tsne_latents)
print("Loaded tSNE latents from '%s'." % args.tsne_latents)
else:
tsne_model = TSNE(n_components=2, verbose=True)
tsne_latents = tsne_model.fit_transform(latents)
# save transformation
latents_name, latents_ext = os.path.splitext(os.path.basename(args.latent_path))
tsne_path = os.path.join(args.out_path, '%s_tsne%s' % (latents_name, latents_ext))
np.save(tsne_path, tsne_latents)
print("Saved tSNE latents to '%s'." % tsne_path)
# create subset
if len(args.eval_task) > 0:
eval_task = json.load(open(args.eval_task, 'r', encoding='utf8'))
eval_idcs = eval_task['examples'] + [i for task in eval_task['tasks'] for i in task['options']]
other_idcs = [i for i in range(tsne_latents.shape[0]) if i not in eval_idcs]
other_idcs = np.random.choice(other_idcs, args.num_points - len(eval_idcs), replace=False)
subset_idcs = np.concatenate((other_idcs, eval_idcs))
print("Loaded %d data points from eval task '%s'." % (len(eval_idcs), args.eval_task))
else:
subset_idcs = np.random.choice(tsne_latents.shape[0], args.num_points, replace=False)
print("Reduced data points to random subset of size %d." % args.num_points)
tsne_latents = tsne_latents[subset_idcs]
labels = labels[subset_idcs]
# shorten class names for BAM
if args.task == 'bam':
dmap = {'emotion_gloomy': 'g', 'emotion_happy': 'h', 'emotion_peaceful': 'p', 'emotion_scary': 's', 'unspecified': 'u'}
label_descs = [''.join([dmap[e] for e in d.split('+')]) for d in label_descs]
eval_task['classes'] = [''.join([dmap[e] for e in d.split('+')]) for d in eval_task['classes']]
# init alphas
alphas = np.zeros(tsne_latents.shape[0])
# calculate means
mean_latent = np.mean(tsne_latents, axis=0)
std_latent = np.std(tsne_latents, axis=0)
mean_latents = np.zeros([len(label_descs), tsne_latents.shape[1]])
for c in range(num_labels):
lbl_idcs = np.where(labels == (cls_idx_map[c] * np.ones_like(labels)))
mean_latents[c] = np.mean(tsne_latents[lbl_idcs], axis=0)
# calculate alphas
if len(lbl_idcs[0]) > 1:
dists = np.abs(tsne_latents[lbl_idcs] - mean_latents[c])
dists = -np.sum(dists, axis=1)
max_dist = np.max(dists)
alphas[lbl_idcs] = | np.clip(dists * (1 / max_dist), .3, None) | numpy.clip |
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import pickle as pkl
import os
import h5py
import pandas as pd
import pdb
from data_utils import load_data, map_data, download_dataset
def normalize_features(feat):
sum= feat.sum(1)
sum_flat=np.array(sum.flatten())
degree = np.asarray(feat.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_norm = degree_inv_mat.dot(feat)
if feat_norm.nnz == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
return feat_norm
def normalize_edge_features_3D(feat):
sum= feat.sum(2)
degree=sum.reshape(-1)
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_r=np.reshape(feat,((feat.shape[0]*feat.shape[1]), feat.shape[2]))
feat_norm = degree_inv_mat.dot(feat_r)
if np.nonzero(feat_norm) == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
feat_norm=feat_norm.reshape(feat.shape[0],feat.shape[1],feat.shape[2])
return feat_norm
def normalize_edge_features_2D(feat):
sum= feat.sum(1)
degree=sum.reshape(-1)
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_norm = degree_inv_mat.dot(feat)
if np.nonzero(feat_norm) == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
return feat_norm
def normalize_edge_features_3Dto_2D(feat):
prob_r = [0.2, 0.3, 0.5, 0.7, 0.8]
i=0
adj_tot = [np.sum(adj,axis=2) for adj in feat]
adjacencies_prioritize = adj_tot
for adj in adjacencies_prioritize:
adj = adj * prob_r[i]
i += 1
adj_sp = [sp.csr_matrix(adj) for adj in adjacencies_prioritize]
adj_sp = globally_normalize_bipartite_adjacency(adj_sp)
return adj_sp
def load_matlab_file(path_file, name_field):
"""
load '.mat' files
inputs:
path_file, string containing the file path
name_field, string containig the field name (default='shape')
warning:
'.mat' files should be saved in the '-v7.3' format
"""
db = h5py.File(path_file, 'r')
ds = db[name_field]
try:
if 'ir' in ds.keys():
data = np.asarray(ds['data'])
ir = np.asarray(ds['ir'])
jc = np.asarray(ds['jc'])
out = sp.csc_matrix((data, ir, jc)).astype(np.float32)
except AttributeError:
# Transpose in case is a dense matrix because of the row- vs column- major ordering between python and matlab
out = np.asarray(ds).astype(np.float32).T
db.close()
return out
def preprocess_user_item_features(u_features, v_features):
"""
Creates one big feature matrix out of user features and item features.
Stacks item features under the user features.
"""
zero_csr_u = sp.csr_matrix((u_features.shape[0], v_features.shape[1]), dtype=u_features.dtype) #121 x 1232
zero_csr_v = sp.csr_matrix((v_features.shape[0], u_features.shape[1]), dtype=v_features.dtype) # 1232 x 121
u_features = sp.hstack([u_features, zero_csr_u], format='csr') # 121 x 121 stack 121 x 1232= 121 x [121 + 1232]
v_features = sp.hstack([zero_csr_v, v_features], format='csr') # 1232 x 121 stack 1232 x 1232= 1232 x [121 + 1232]
return u_features, v_features
def globally_normalize_bipartite_adjacency(adjacencies, verbose=False, symmetric=True):
""" Globally Normalizes set of bipartite adjacency matrices """
#a=isinstance(adjacencies,list) #true
if verbose:
print('Symmetrically normalizing bipartite adj')
# degree_u and degree_v are row and column sums of adj+I
adj_tot = np.sum(adj for adj in adjacencies)
degree_u = np.asarray(adj_tot.sum(1)).flatten()
degree_v = np.asarray(adj_tot.sum(0)).flatten()
# set zeros to inf to avoid dividing by zero
degree_u[degree_u == 0.] = np.inf
degree_v[degree_v == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(degree_u) # 1 /sqroot degree of u
degree_v_inv_sqrt = 1. / np.sqrt(degree_v) # 1 /sqroot degree of v
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
degree_u_inv = degree_u_inv_sqrt_mat.dot(degree_u_inv_sqrt_mat)
if symmetric:
#print("yes sym") called for ml _100k
adj_norm = [degree_u_inv_sqrt_mat.dot(adj).dot(degree_v_inv_sqrt_mat) for adj in adjacencies]
else:
adj_norm = [degree_u_inv.dot(adj) for adj in adjacencies]
return adj_norm
def globally_normalize_tripartite_adjacency_matrix(adjacencies, verbose=False, symmetric=True):
""" Globally Normalizes set of bipartite adjacency matrices """
# a=isinstance(adjacencies,list) #true
if verbose:
print('Symmetrically normalizing bipartite adj')
# degree_u and degree_v are row and column sums of adj+I
adjacencies_uv_c= [np.reshape(adj,(adj.shape[0]*adj.shape[1], adj.shape[2])) for adj in adjacencies]
print(f"adjacencies_uv_c.shape[0] {adjacencies_uv_c[0].shape}")
adj_tot = np.sum(adj for adj in adjacencies)
adj_tot_uv_c=np.reshape(adj_tot, (adj_tot.shape[0]*adj_tot.shape[1], adj_tot.shape[2]))
adj_tot_t=np.transpose(adj_tot, (1,0,2))
adj_tot_vu_c = np.reshape(adj_tot_t, (adj_tot_t.shape[0] * adj_tot_t.shape[1], adj_tot_t.shape[2]))
degree_uv_c = np.asarray(adj_tot_uv_c.sum(1)).flatten()
degree_vu_c = np.asarray(adj_tot_vu_c.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree_uv_c[degree_uv_c == 0.] = np.inf
degree_vu_c[degree_vu_c == 0.] = np.inf
degree_uv_inv_sqrt = 1. / np.sqrt(degree_uv_c) # 1 /sqroot degree of u
degree_vu_inv_sqrt = 1. / np.sqrt(degree_vu_c) # 1 /sqroot degree of v
degree_uv_inv_sqrt_mat = sp.diags([degree_uv_inv_sqrt], [0])
degree_vu_inv_sqrt_mat = sp.diags([degree_vu_inv_sqrt], [0])
degree_uv_inv = degree_uv_inv_sqrt_mat.dot(degree_uv_inv_sqrt_mat)
degree_vu_inv = degree_vu_inv_sqrt_mat.dot(degree_vu_inv_sqrt_mat)
if symmetric:
# print("yes sym") called for ml _100k
adj_norm_uv_c = [degree_uv_inv_sqrt_mat.dot(adj) for adj in adjacencies_uv_c]
adj_norm_u_v_c=[np.reshape(adj, (adjacencies[0].shape[0],adjacencies[0].shape[1], adjacencies[0].shape[2])) for adj in adj_norm_uv_c]
adj_norm_v_u_c = [np.transpose(adj, (1, 0, 2)) for adj in adj_norm_u_v_c]
adj_norm_vu_c=[np.reshape(adj, (adj.shape[0]*adj.shape[1], adj.shape[2])) for adj in adj_norm_v_u_c]
adj_norm_vu_c = [degree_vu_inv_sqrt_mat.dot(adj) for adj in adj_norm_vu_c]
adj_norm = [np.reshape(adj,(adjacencies[0].shape[0], adjacencies[0].shape[1], adjacencies[0].shape[2])) for adj in adj_norm_vu_c]
else:
adj_norm = [degree_uv_inv.dot(adj) for adj in adjacencies]
print(f"adj_normc {adj_norm[0].shape}")
return adj_norm
def user_context_adjacency(adjacencies):
""" Find importance of context for users
giving high probability to context with rating 5
"""
print(f"I am user_context_adjacency {type(adjacencies)} adjacencies[0].shape {adjacencies[0].shape}")
adj_tot = np.sum(adj for adj in adjacencies)
deg_u=np.sum(adj_tot, axis = 1)
deg_u = np.sum(deg_u, axis=1)
print(f"degree_u {deg_u.shape}")
# set zeros to inf to avoid dividing by zero
deg_u[deg_u == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(deg_u)
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
adju_c=[np.sum(adj, axis=1) for adj in adjacencies]
adju_c_norm = [degree_u_inv_sqrt_mat.dot(adj) for adj in adju_c]
#normalize this matrix by divisding squareroot of degtree
#print(f"degree_u_inv_sqrt_mat shape {degree_u_inv_sqrt_mat.shape} {degree_u_inv_sqrt_mat}")
prob_r=[0.2,0.3,0.5,0.7,0.8]
i=0
adjacencies_prioritize=adju_c_norm
for adj in adjacencies_prioritize:
adj= adj * prob_r[i]
i+=1
adju_c_imp=np.sum(adj for adj in adjacencies_prioritize)
#adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adju_c_imp
def item_context_adjacency(adjacencies):
""" Find importance of context for items
giving high probability to context with rating 5
"""
adj_tot = np.sum(adj for adj in adjacencies)
deg_v = np.sum(adj_tot, axis=1)
deg_v = np.sum(deg_v, axis=1)
# set zeros to inf to avoid dividing by zero
deg_v[deg_v == 0.] = np.inf
degree_v_inv_sqrt = 1. / np.sqrt(deg_v)
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
adjv_c = [np.sum(adj, axis=1) for adj in adjacencies]
adjv_c_norm = [degree_v_inv_sqrt_mat.dot(adj) for adj in adjv_c]
# normalize this matrix by divisding squareroot of degtree
prob_r = [0.2, 0.3, 0.5, 0.7, 0.8]
i = 0
adjacencies_prioritize = adjv_c_norm
for adj in adjacencies_prioritize:
adj = adj * prob_r[i]
i += 1
adjv_c_imp = np.sum(adj for adj in adjacencies_prioritize)
# adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adjv_c_imp
def user_context_fromedge(adjacency):
""" Find importance of context for users
giving high probability to context with rating 5
"""
deg_u = np.sum(adjacency, axis=1)
deg_u = np.sum(deg_u, axis=1)
print(f"degree_u {deg_u.shape}")
# set zeros to inf to avoid dividing by zero
deg_u[deg_u == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(deg_u)
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
adju_c = np.sum(adjacency, axis=1)
adju_c_norm = degree_u_inv_sqrt_mat.dot(adju_c)
# normalize this matrix by divisding squareroot of degtree
return adju_c_norm
def item_context_fromedge(adjacency):
""" Find importance of context for users
giving high probability to context with rating 5
"""
deg_v = np.sum(adjacency, axis=1)
deg_v = np.sum(deg_v, axis=1)
# set zeros to inf to avoid dividing by zero
deg_v[deg_v == 0.] = np.inf
degree_v_inv_sqrt = 1. / np.sqrt(deg_v)
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
adjv_c = np.sum(adjacency, axis=1)
adjv_c_norm = degree_v_inv_sqrt_mat.dot(adjv_c)
# adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adjv_c_norm
def sparse_to_tuple(sparse_mx):
""" change of format for sparse matrix. This format is used
for the feed_dict where sparse matrices need to be linked to placeholders
representing sparse matrices. """
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def create_trainvaltest_split(dataset, seed=1234, testing=False, datasplit_path=None, datasplit_from_file=False,
verbose=True, rating_map=None, post_rating_map=None, ratio=1.0):
"""
Splits data set into train/val/test sets from full bipartite adjacency matrix. Shuffling of dataset is done in
load_data function.
For each split computes 1-of-num_classes labels. Also computes training
adjacency matrix.
"""
if datasplit_from_file and os.path.isfile(datasplit_path):
print('Reading dataset splits from file...')
with open(datasplit_path, 'rb') as f:
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features = pkl.load(f)
if verbose:
print('Number of users = %d' % num_users)
print('Number of items = %d' % num_items)
print('Number of links = %d' % ratings.shape[0])
print('Fraction of positive links = %.4f' % (float(ratings.shape[0]) / (num_users * num_items),))
else:
print(f"I am preprocessing {dataset} ")
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features, edge_feactures,sim_users = load_data(dataset, seed=seed,
verbose=verbose)
with open(datasplit_path, 'wb') as f:
pkl.dump([num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features], f)
if rating_map is not None:
for i, x in enumerate(ratings):
ratings[i] = rating_map[x]
neutral_rating = -1
rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())}
labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings])
labels = labels.reshape([-1])
# number of test and validation edges
num_test = int(np.ceil(ratings.shape[0] * 0.1))
if dataset == 'ml_100k':
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
else:
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
num_train = ratings.shape[0] - num_val - num_test
pairs_nonzero = np.array([[u, v] for u, v in zip(u_nodes, v_nodes)])
idx_nonzero = np.array([u * num_items + v for u, v in pairs_nonzero])
train_idx = idx_nonzero[0:int(num_train * ratio)]
val_idx = idx_nonzero[num_train:num_train + num_val]
test_idx = idx_nonzero[num_train + num_val:]
train_pairs_idx = pairs_nonzero[0:int(num_train * ratio)]
val_pairs_idx = pairs_nonzero[num_train:num_train + num_val]
test_pairs_idx = pairs_nonzero[num_train + num_val:]
u_test_idx, v_test_idx = test_pairs_idx.transpose()
u_val_idx, v_val_idx = val_pairs_idx.transpose()
u_train_idx, v_train_idx = train_pairs_idx.transpose()
# create labels
train_labels = labels[train_idx]
val_labels = labels[val_idx]
test_labels = labels[test_idx]
if testing:
u_train_idx = np.hstack([u_train_idx, u_val_idx])
v_train_idx = np.hstack([v_train_idx, v_val_idx])
train_labels = np.hstack([train_labels, val_labels])
# for adjacency matrix construction
train_idx = | np.hstack([train_idx, val_idx]) | numpy.hstack |
import os
import numpy as np
import ruamel.yaml as yaml
import time
def first_moment(data):
"""`first_moment` feature function
:param data: data to evaluate feature function on
:return: returns data as is
"""
return data
def second_moment(data):
"""`second_moment` feature function
:param data: data to evaluate feature function on
:return: squared data
"""
return data**2
class MaximumEntropyModel(object):
"""Maximum entropyGenerative model based as discussed in https://arxiv.org/abs/1803.08823"""
def __init__(self, features=(first_moment, second_moment), initial_weights=None, data=None, l1=0., l2=0., file=None):
"""Construct MaximumEntropyModel instance
:param features: callable or list of callable representing features or list of features (will be concatenated)
:param initial_weights: initial values for weights array, needs to be of same shape as concatenated features
:param data: data to fit
:param l1: l1 regularization strength, defaults to 0.
:param l2: l2 regularization strength, defaults to 0.
:param file: output file path for fit history (weights and losses, can be used to continue fit)
"""
self._features = features
self._weights = np.asarray(initial_weights) if initial_weights is not None else None
self._l1 = l1
self._l2 = l2
self._data = data
self._positive_phase = None # <features_i>_data
self._positive_negative = None # <features_i>_model
self._batch_size = None
self._data_batch = None
self._model_batch = None
self._max_steps = None
self._step = 0
self._file = file
self.history = self.load(file)
def features(self, data):
""" return features evaluated on data
:param data: samples of data (2d array)
:return: concatenated values of feature functions, evaluated on sample data
"""
data = | np.asarray(data) | numpy.asarray |
# 14 July 2018 <NAME>
# Python bootcamp, lesson 40: Image processing practice with Python
# Import numerical modules
import numpy as np
import scipy.optimize
# Import modules for plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Modules for image processing
import skimage.io
import skimage.morphology
import skimage.segmentation
import skimage.measure
# Modules for interacting with our file system
import os
import glob
##### Problem 5.2: Filter, extract, rinse, repeat
# We first copy in the image segmentation functions from the previous
# image processing practice session
def cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase',
area_bounds=(0,1e7), ecc_bounds=(0, 1)):
"""
This function segments a given image via thresholding and returns a labeled
segmentation mask.
Parameters
----------
im : 2d-array
Image to be segmented. This may be of either float or
integer data type.
thresh : int, float, or 'otsu'
Value used during thresholding operation. This can either be a value
(`int` or `float`) or 'otsu'. If 'otsu', the threshold value will be
determined automatically using Otsu's thresholding method.
radius : float
Radius for gaussian blur for background subtraction. Default
value is 20.
image_mode : 'phase' or 'fluorescence'
Mode of microscopy used to capture the image. If 'phase', objects
with intensity values *lower* than the provided threshold will be
selected. If `fluorescence`, values *greater* than the provided
threshold will be selected. Default value is 'phase'.
area_bounds : tuple of ints.
Range of areas of acceptable objects. This should be provided in units
of square pixels.
ecc_bounds : tuple of floats
Range of eccentricity values of acceptable objects. These values should
range between 0.0 and 1.0.
Returns
-------
im_labeled : 2d-array, int
Labeled segmentation mask.
"""
# Apply a median filter to remove hot pixels.
med_selem = skimage.morphology.square(3)
im_filt = skimage.filters.median(im, selem=med_selem)
# Perform gaussian subtraction
im_sub = bg_subtract(im_filt, radius)
# Determine the thresholding method.
if thresh is 'otsu':
thresh = skimage.filters.threshold_otsu(im_sub)
# Determine the image mode and apply threshold.
if image_mode is 'phase':
im_thresh = im_sub < thresh
elif image_mode is 'fluorescence':
im_thresh = im_sub > thresh
else:
raise ValueError("image mode not recognized. Must be 'phase' "
+ " or 'fluorescence'")
# Label the objects.
im_label = skimage.measure.label(im_thresh)
# Apply the area and eccentricity bounds.
im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds)
# Remove objects touching the border.
im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5)
# Relabel the image.
im_border = im_border > 0
im_label = skimage.measure.label(im_border)
return im_label
def bg_subtract(im, radius):
"""
Subtracts a gaussian blurred image from itself smoothing uneven
illumination.
Parameters
----------
im : 2d-array
Image to be subtracted
radius : int or float
Radius of gaussian blur
Returns
-------
im_sub : 2d-array, float
Background subtracted image.
"""
# Apply the gaussian filter.
im_filt = skimage.filters.gaussian(im, radius)
# Ensure the original image is a float
if | np.max(im) | numpy.max |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = | np.full((5, 1, 1, 3), value, dtype=dt) | numpy.full |
# -*- coding: utf-8 -*-
"""
Source: https://www.solcore.solar/
This is mostly based on <NAME>' tmm package, with modifications to vectorize the code
over wavelengths (by <NAME>), and to include depth-dependent absorption calculations
in incoherent layers using the Beer-Lambert law (by <NAME>).
All credit of the algorithm, testing, etc. goes to <NAME>. For more details, visit:
- https://arxiv.org/abs/1603.02720
- http://sjbyrnes.com
- For information see the docstring of each function, and also see manual.pdf
The most two important functions are:
coh_tmm(...) -- the transfer-matrix-method calculation in the coherent
case (i.e. thin films)
inc_tmm(...) -- the transfer-matrix-method calculation in the incoherent
case (i.e. films tens or hundreds of wavelengths thick, or whose
thickness is not very uniform.
These functions are all imported into the main package (tmm) namespace,
so you can call them with tmm.coh_tmm(...) etc.
"""
import scipy as sp
import numpy as np
import sys
EPSILON = sys.float_info.epsilon # typical floating-point calculation error
def make_2x2_array(a, b, c, d, dtype=float):
"""
Makes a 2x2 numpy array of [[a,b],[c,d]]
Same as "numpy.array([[a,b],[c,d]], dtype=float)", but ten times faster
"""
my_array = np.empty((len(a), 2, 2), dtype=dtype)
my_array[:, 0, 0] = a
my_array[:, 0, 1] = b
my_array[:, 1, 0] = c
my_array[:, 1, 1] = d
return my_array
def snell(n_1, n_2, th_1):
"""
return angle theta in layer 2 with refractive index n_2, assuming
it has angle th_1 in layer with refractive index n_1. Use Snell's law. Note
that "angles" may be complex!!
"""
# Important that the arcsin here is scipy.arcsin, not numpy.arcsin!! (They
# give different results e.g. for arcsin(2).)
# Use real_if_close because e.g. arcsin(2 + 1e-17j) is very different from
# arcsin(2) due to branch cut
return sp.arcsin(np.real_if_close(n_1 * np.sin(th_1) / n_2))
def list_snell(n_list, th_0):
"""
return list of angle theta in each layer based on angle th_0 in layer 0,
using Snell's law. n_list is index of refraction of each layer. Note that
"angles" may be complex!!
"""
# Important that the arcsin here is scipy.arcsin, not numpy.arcsin!! (They
# give different results e.g. for arcsin(2).)
# Use real_if_close because e.g. arcsin(2 + 1e-17j) is very different from
# arcsin(2) due to branch cut
return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))
def interface_r(polarization, n_i, n_f, th_i, th_f):
"""
reflection amplitude (from Fresnel equations)
polarization is either "s" or "p" for polarization
n_i, n_f are (complex) refractive index for incident and final
th_i, th_f are (complex) propegation angle for incident and final
(in radians, where 0=normal). "th" stands for "theta".
"""
if polarization == 's':
# return 2 * n_i * np.cos(th_i) / (n_i * np.cos(th_i) + n_f * np.cos(th_f))
return ((n_i * np.cos(th_i) - n_f * np.cos(th_f)) /
(n_i * np.cos(th_i) + n_f * np.cos(th_f)))
elif polarization == 'p':
return ((n_f * np.cos(th_i) - n_i * np.cos(th_f)) /
(n_f * | np.cos(th_i) | numpy.cos |
import time
import os
import numpy as np
import scipy
from npMatrix3d import *
from npMatrix2d import *
# ============================================================================
#
# This file contains code for all Fisher Scoring based (univariate/"one-
# model") parameter estimation methods developed during the course of the LMM
# project. The methods given here are:
#
# - `FS`: Fisher Scoring
# - `fFS`: Full-Fisher Scoring
# - `SFS`: Simplified Fisher Scoring
# - `fSFS`: Full-Simplified Fisher Scoring
# - `cSFS`: Cholesky Simplified Fisher
#
# ----------------------------------------------------------------------------
#
# Author: <NAME> (Last edited 07/04/2020)
#
# ============================================================================
# ============================================================================
#
# This below function performs Cholesky Fisher Scoring for the Linear Mixed
# Model. It is based on the update rules:
#
# beta = (X'V^(-1)X)^(-1)(X'V^(-1)Y)
#
# sigma2 = e'V^(-1)e/n
#
# for k in {1,...,r};
# vechTri(Chol_k) = \theta_f + lam*I(vechTri(Chol_k))^(-1) (dl/dvechTri(Chol_k))
#
# Where:
# - chol_k is the lower triangular cholesky factor of D_k
# - vechTri(A) is the vector of lower triangular elements of A.
# - lam is a scalar stepsize.
# - I(vechTri(Chol_k)) is the Fisher Information matrix of vechTri(Chol_k).
# - dl/dvechTri(Chol_k) is the derivative of the log likelihood of
# (beta, sigma^2, vechTri(Chol_1),...vechTri(Chol_r)) with respect
# to vechTri(Chol_k).
# - e is the residual vector (e=Y-X\beta)
# - V is the matrix (I+ZDZ').
#
# ----------------------------------------------------------------------------
#
# This function takes as input;
#
# ----------------------------------------------------------------------------
#
# - `XtX`: X transpose multiplied by X.
# - `XtY`: X transpose multiplied by Y.
# - `XtZ`: X transpose multiplied by Z.
# - `YtX`: Y transpose multiplied by X.
# - `YtY`: Y transpose multiplied by Y.
# - `YtZ`: Y transpose multiplied by Z.
# - `ZtX`: Z transpose multiplied by X.
# - `ZtY`: Z transpose multiplied by Y.
# - `ZtZ`: Z transpose multiplied by Z.
# - `nlevels`: A vector containing the number of levels for each factor,
# e.g. `nlevels=[3,4]` would mean the first factor has 3 levels
# and the second factor has 4 levels.
# - `nraneffs`: A vector containing the number of random effects for each
# factor, e.g. `nraneffs=[2,1]` would mean the first factor has
# random effects and the second factor has 1 random effect.
# - `tol`: A scalar tolerance value. Iteration stops once successive
# log-likelihood values no longer exceed `tol`.
# - `n`: The number of observations.
# - `init_paramVector`: (Optional) initial estimates of the parameter vector.
#
# ----------------------------------------------------------------------------
#
# And returns:
#
# ----------------------------------------------------------------------------
#
# - `paramVector`: The parameter vector (beta,sigma2,vech(D_1),...,vech(D_r))
# - `bvals`: Estimates of the random effects vector, b.
# - `nit`: The number of iterations taken to converge.
#
# ============================================================================
def cSFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=False, init_paramVector=None):
# ------------------------------------------------------------------------------
# Useful scalars
# ------------------------------------------------------------------------------
# Number of factors, r
r = len(nlevels)
# Number of random effects, q
q = np.sum(np.dot(nraneffs,nlevels))
# Number of fixed effects, p
p = XtX.shape[0]
# ------------------------------------------------------------------------------
# Index variables
# ------------------------------------------------------------------------------
# Work out the total number of parameters
tnp = np.int32(p + 1 + np.sum(nraneffs*(nraneffs+1)/2))
# Indices for submatrics corresponding to Dks
FishIndsDk = np.int32(np.cumsum(nraneffs*(nraneffs+1)/2) + p + 1)
FishIndsDk = np.insert(FishIndsDk,0,p+1)
# ------------------------------------------------------------------------------
# Work out D indices (there is one block of D per level)
# ------------------------------------------------------------------------------
Dinds = np.zeros(np.sum(nlevels)+1)
counter = 0
# Loop through and add each index
for k in np.arange(len(nraneffs)):
for j in np.arange(nlevels[k]):
Dinds[counter] = np.concatenate((np.array([0]), np.cumsum(nlevels*nraneffs)))[k] + nraneffs[k]*j
counter = counter + 1
# Last index will be missing so add it
Dinds[len(Dinds)-1]=Dinds[len(Dinds)-2]+nraneffs[-1]
# Make sure indices are ints
Dinds = np.int64(Dinds)
# ------------------------------------------------------------------------------
# Duplication, Commutation and Elimination matrices
# ------------------------------------------------------------------------------
dupMatTdict = dict()
elimMatdict = dict()
comMatdict = dict()
for i in np.arange(len(nraneffs)):
dupMatTdict[i] = dupMat2D(nraneffs[i]).transpose()
comMatdict[i] = comMat2D(nraneffs[i],nraneffs[i])
elimMatdict[i] = elimMat2D(nraneffs[i])
# ------------------------------------------------------------------------------
# Initial estimates
# ------------------------------------------------------------------------------
# If they have been specified as inputs use those.
if init_paramVector is not None:
# beta and sigma2 initial values
beta = init_paramVector[0:p]
sigma2 = init_paramVector[p:(p+1)][0,0]
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Initial cholesky decomposition and D.
Ddict = dict()
cholDict = dict()
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(initDk2D(k, ZtZ, Zte, sigma2, nlevels, nraneffs, dupMatTdict))
cholDict[k] = np.linalg.cholesky(Ddict[k])
# Matrix version
D = scipy.sparse.lil_matrix((q,q))
counter = 0
for k in np.arange(len(nraneffs)):
for j in np.arange(nlevels[k]):
D[Dinds[counter]:Dinds[counter+1], Dinds[counter]:Dinds[counter+1]] = Ddict[k]
counter = counter + 1
D = D.toarray()
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Z'e, needed for first iteration
Zte = ZtY - (ZtX @ beta)
# Otherwise use the closed form initial estimates
else:
# Inital beta
beta = initBeta2D(XtX, XtY)
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Initial sigma2
sigma2 = initSigma22D(ete, n)
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Z'e
Zte = ZtY - (ZtX @ beta)
# Inital cholesky decomposition and D
Ddict = dict()
cholDict = dict()
for k in np.arange(len(nraneffs)):
# We just initialize to identity for cholesky.
cholDict[k] = np.eye(nraneffs[k])
Ddict[k] = np.eye(nraneffs[k])
# Matrix version
D = scipy.sparse.lil_matrix((q,q))
t1 = time.time()
counter = 0
for k in np.arange(len(nraneffs)):
for j in np.arange(nlevels[k]):
D[Dinds[counter]:Dinds[counter+1], Dinds[counter]:Dinds[counter+1]] = Ddict[k]
counter = counter + 1
D = D.toarray()
# ------------------------------------------------------------------------------
# Obtain D(I+Z'ZD)^(-1)
# ------------------------------------------------------------------------------
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# ------------------------------------------------------------------------------
# Initial lambda and likelihoods
# ------------------------------------------------------------------------------
# Step size lambda
lam = 1
# Initial log likelihoods
llhprev = np.inf
llhcurr = -np.inf
# ------------------------------------------------------------------------------
# Dicts to save repeated computation.
# ------------------------------------------------------------------------------
# This will hold the matrices: Sum_j^{l_k} Z_{i,j}'Z_{i,j}
ZtZmatdict = dict()
for k in np.arange(len(nraneffs)):
ZtZmatdict[k] = None
# This will hold the permutations needed for the covariance between the
# derivatives with respect to k
permdict = dict()
for k in np.arange(len(nraneffs)):
permdict[str(k)] = None
# ------------------------------------------------------------------------------
# Iteration
# ------------------------------------------------------------------------------
# Number of iterations
nit = 0
while np.abs(llhprev-llhcurr)>tol:
# Change current likelihood to previous
llhprev = llhcurr
# Update number of iterations
nit = nit+1
#---------------------------------------------------------------------------
# Update beta
#---------------------------------------------------------------------------
beta = np.linalg.solve(XtX - XtZ @ DinvIplusZtZD @ ZtX, XtY - XtZ @ DinvIplusZtZD @ ZtY)
#---------------------------------------------------------------------------
# Update sigma2
#---------------------------------------------------------------------------
if reml==False:
sigma2 = 1/n*(ete - Zte.transpose() @ DinvIplusZtZD @ Zte)
else:
sigma2 = 1/(n-p)*(ete - Zte.transpose() @ DinvIplusZtZD @ Zte)
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
#---------------------------------------------------------------------------
# Update Cholesky factor
#---------------------------------------------------------------------------
counter = 0
# Loop though unique blocks of D updating one at a time
for k in np.arange(len(nraneffs)):
#-----------------------------------------------------------------------
# Calculate derivative with respect to D_k
#-----------------------------------------------------------------------
# Work out derivative
if ZtZmatdict[k] is None:
dldD,ZtZmatdict[k] = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD, None, reml, ZtX, XtX)
else:
dldD,_ = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD,ZtZmatdict[k], reml, ZtX, XtX)
#-----------------------------------------------------------------------
# Calculate covariance of derivative with respect to D_k
#-----------------------------------------------------------------------
if permdict[str(k)] is None:
covdldDk,permdict[str(k)] = get_covdldDk1Dk22D(k, k, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, perm=None)
else:
covdldDk,_ = get_covdldDk1Dk22D(k, k, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, perm=permdict[str(k)])
#-----------------------------------------------------------------------
# Transform to derivative with respect to chol_k
#-----------------------------------------------------------------------
# We need to modify by multiplying by this matrix to obtain the cholesky derivative.
chol_mod = elimMatdict[k] @ scipy.sparse.kron(cholDict[k],np.eye(nraneffs[k])).transpose() @ (scipy.sparse.identity(nraneffs[k]**2) + comMatdict[k]) @ dupMatTdict[k].transpose()
# Transform to cholesky
dldcholk = chol_mod @ dupMatTdict[k] @ mat2vec2D(dldD)
#-----------------------------------------------------------------------
# Transform to covariance of derivative with respect to chol_k
#-----------------------------------------------------------------------
covdldcholk = chol_mod @ covdldDk @ chol_mod.transpose()
#-----------------------------------------------------------------------
# Perform update
#-----------------------------------------------------------------------
update = lam*np.linalg.pinv(forceSym2D(covdldcholk)) @ dldcholk #lam*np.linalg.solve(forceSym2D(covdldcholk), dldcholk)
#-----------------------------------------------------------------------
# Update D_k and chol_k
#-----------------------------------------------------------------------
cholDict[k] = vechTri2mat2D(mat2vechTri2D(cholDict[k]) + update)
Ddict[k] = cholDict[k] @ cholDict[k].transpose()
# Add D_k back into D and recompute DinvIplusZtZD
for j in np.arange(nlevels[k]):
D[Dinds[counter]:Dinds[counter+1], Dinds[counter]:Dinds[counter+1]] = Ddict[k]
counter = counter + 1
#-----------------------------------------------------------------------
# Obtain D(I+Z'ZD)^(-1)
#-----------------------------------------------------------------------
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# --------------------------------------------------------------------------
# Matrices for next iteration
# --------------------------------------------------------------------------
# Recalculate Zte and ete
Zte = ZtY - (ZtX @ beta)
# Sum of squared residuals
ete = ssr2D(YtX, YtY, XtX, beta)
# Check sigma2 hasn't hit a boundary
if sigma2<0:
sigma2=1e-10
#---------------------------------------------------------------------------
# Update the step size and log likelihood
#---------------------------------------------------------------------------
llhcurr = llh2D(n, ZtZ, Zte, ete, sigma2, DinvIplusZtZD,D,reml,XtX,XtZ,ZtX)[0,0]
if llhprev>llhcurr:
lam = lam/2
#-------------------------------------------------------------------------------
# Save parameter vector
#-------------------------------------------------------------------------------
paramVector = np.concatenate((beta, sigma2))
for k in np.arange(len(nraneffs)):
paramVector = np.concatenate((paramVector, mat2vech2D(Ddict[k])))
#-------------------------------------------------------------------------------
# Work out b values
#-------------------------------------------------------------------------------
bvals = DinvIplusZtZD @ Zte
return(paramVector, bvals, nit, llhcurr)
# ============================================================================
#
# This below function performs Fisher Scoring for the Linear Mixed Model. It
# is based on the update rule:
#
# \theta_h = \theta_h + lam*I(\theta_h)^(-1) (dl/d\theta_h)
#
# Where \theta_h is the vector (beta, sigma2, vech(D1),...vech(Dr)), lam is a
# scalar stepsize, I(\theta_h) is the Fisher Information matrix of \theta_h
# and dl/d\theta_h is the derivative of the log likelihood of \theta_h with
# respect to \theta_h.
#
# ----------------------------------------------------------------------------
#
# This function takes as input;
#
# ----------------------------------------------------------------------------
#
# - `XtX`: X transpose multiplied by X.
# - `XtY`: X transpose multiplied by Y.
# - `XtZ`: X transpose multiplied by Z.
# - `YtX`: Y transpose multiplied by X.
# - `YtY`: Y transpose multiplied by Y.
# - `YtZ`: Y transpose multiplied by Z.
# - `ZtX`: Z transpose multiplied by X.
# - `ZtY`: Z transpose multiplied by Y.
# - `ZtZ`: Z transpose multiplied by Z.
# - `nlevels`: A vector containing the number of levels for each factor,
# e.g. `nlevels=[3,4]` would mean the first factor has 3 levels
# and the second factor has 4 levels.
# - `nraneffs`: A vector containing the number of random effects for each
# factor, e.g. `nraneffs=[2,1]` would mean the first factor has
# random effects and the second factor has 1 random effect.
# - `tol`: A scalar tolerance value. Iteration stops once successive
# log-likelihood values no longer exceed `tol`.
# - `n`: The number of observations.
# - `init_paramVector`: (Optional) initial estimates of the parameter vector.
#
# ----------------------------------------------------------------------------
#
# And returns:
#
# ----------------------------------------------------------------------------
#
# - `paramVector`: The parameter vector (beta,sigma2,vech(D_1),...,vech(D_r))
# - `bvals`: Estimates of the random effects vector, b.
# - `nit`: The number of iterations taken to converge.
#
# ============================================================================
def FS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=False, init_paramVector=None):
# ------------------------------------------------------------------------------
# Useful scalars
# ------------------------------------------------------------------------------
# Number of factors, r
r = len(nlevels)
# Number of random effects, q
q = np.sum(np.dot(nraneffs,nlevels))
# Number of fixed effects, p
p = XtX.shape[0]
# ------------------------------------------------------------------------------
# Index variables
# ------------------------------------------------------------------------------
# Work out the total number of parameters
tnp = np.int32(p + 1 + np.sum(nraneffs*(nraneffs+1)/2))
# Indices for submatrics corresponding to Dks
FishIndsDk = np.int32(np.cumsum(nraneffs*(nraneffs+1)/2) + p + 1)
FishIndsDk = np.insert(FishIndsDk,0,p+1)
# ------------------------------------------------------------------------------
# Duplication matrices
# ------------------------------------------------------------------------------
dupMatTdict = dict()
for i in np.arange(len(nraneffs)):
dupMatTdict[i] = dupMat2D(nraneffs[i]).transpose()
# ------------------------------------------------------------------------------
# Initial estimates
# ------------------------------------------------------------------------------
if init_paramVector is not None:
# Initial beta and sigma2
beta = init_paramVector[0:p]
sigma2 = init_paramVector[p:(p+1)][0,0]
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Initial D (dictionary version)
Ddict = dict()
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(vech2mat2D(init_paramVector[FishIndsDk[k]:FishIndsDk[k+1]]))
# Initial D (matrix version)
for i in np.arange(len(nraneffs)):
for j in np.arange(nlevels[i]):
# Add block
if i == 0 and j == 0:
D = Ddict[i]
else:
D = scipy.linalg.block_diag(D, Ddict[i])
D = D.toarray()
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Initial sigma2
sigma2 = initSigma22D(ete, n)
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# If we don't have initial values estimate them
else:
# Inital beta
beta = initBeta2D(XtX, XtY)
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Initial sigma2
sigma2 = initSigma22D(ete, n)
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Z'e, needed for first iteration
Zte = ZtY - (ZtX @ beta)
# Inital D (Dictionary version)
Ddict = dict()
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(initDk2D(k, ZtZ, Zte, sigma2, nlevels, nraneffs, dupMatTdict))
# Matrix version
D = np.array([])
for i in np.arange(len(nraneffs)):
for j in np.arange(nlevels[i]):
# Add block
if i == 0 and j == 0:
D = Ddict[i]
else:
D = scipy.linalg.block_diag(D, Ddict[i])
# ------------------------------------------------------------------------------
# Initial lambda and likelihoods
# ------------------------------------------------------------------------------
# Step size lambda
lam = 1
# Initial log likelihoods
llhprev = np.inf
llhcurr = -np.inf
# ------------------------------------------------------------------------------
# Dicts to save repeated computation.
# ------------------------------------------------------------------------------
# This will hold the matrices: Sum_j^{l_k} Z_{i,j}'Z_{i,j}
ZtZmatdict = dict()
for k in np.arange(len(nraneffs)):
ZtZmatdict[k] = None
# This will hold the permutations needed for the covariance between the
# derivatives with respect to k1 and k2
permdict = dict()
for k1 in np.arange(len(nraneffs)):
for k2 in np.arange(len(nraneffs)):
permdict[str(k1)+str(k2)] = None
# ------------------------------------------------------------------------------
# Iteration
# ------------------------------------------------------------------------------
# Number of iterations
nit = 0
while np.abs(llhprev-llhcurr)>tol:
# Update nit
nit = nit+1
# Change current likelihood to previous
llhprev = llhcurr
# --------------------------------------------------------------------------
# Matrices needed later
# --------------------------------------------------------------------------
# X transpose e and Z transpose e
Xte = XtY - (XtX @ beta)
# --------------------------------------------------------------------------
# Obtain D(I+Z'ZD)^(-1)
# --------------------------------------------------------------------------
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# --------------------------------------------------------------------------
# Derivatives
# --------------------------------------------------------------------------
# Derivative wrt beta
dldB = get_dldB2D(sigma2, Xte, XtZ, DinvIplusZtZD, Zte)
# Derivative wrt sigma^2
dldsigma2 = get_dldsigma22D(n, ete, Zte, sigma2, DinvIplusZtZD, reml, p)
# For each factor, factor k, work out dl/dD_k
dldDdict = dict()
for k in np.arange(len(nraneffs)):
# Store it in the dictionary# Store it in the dictionary
if ZtZmatdict[k] is None:
dldDdict[k],ZtZmatdict[k] = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD,None, reml, ZtX, XtX)
else:
dldDdict[k],_ = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD,ZtZmatdict[k], reml, ZtX, XtX)
# --------------------------------------------------------------------------
# Covariance of dl/dsigma2
# --------------------------------------------------------------------------
covdldsigma2 = n/(2*(sigma2**2))
# --------------------------------------------------------------------------
# Construct the Fisher Information matrix
# --------------------------------------------------------------------------
FisherInfoMat = np.zeros((tnp,tnp))
# Add dl/dbeta covariance
FisherInfoMat[np.ix_(np.arange(p),np.arange(p))] = get_covdldbeta2D(XtZ, XtX, ZtZ, DinvIplusZtZD, sigma2)
# Add dl/dsigma2 covariance
FisherInfoMat[p,p] = covdldsigma2
# Add dl/dsigma2 dl/dD covariance
for k in np.arange(len(nraneffs)):
# Assign to the relevant block
if ZtZmatdict[k] is None:
covdldDksigma2,ZtZmatdict[k] = get_covdldDkdsigma22D(k, sigma2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, ZtZmat=None)
else:
covdldDksigma2,_ = get_covdldDkdsigma22D(k, sigma2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, ZtZmat=ZtZmatdict[k])
# Assign to the relevant block
FisherInfoMat[p, FishIndsDk[k]:FishIndsDk[k+1]] = covdldDksigma2.reshape(FishIndsDk[k+1]-FishIndsDk[k])
FisherInfoMat[FishIndsDk[k]:FishIndsDk[k+1],p] = FisherInfoMat[p, FishIndsDk[k]:FishIndsDk[k+1]].transpose()
# Add dl/dD covariance for each pair (k1,k2) of random factors
for k1 in np.arange(len(nraneffs)):
for k2 in np.arange(k1+1):
# Work out the indices of random factor k1 and random factor k2
IndsDk1 = np.arange(FishIndsDk[k1],FishIndsDk[k1+1])
IndsDk2 = np.arange(FishIndsDk[k2],FishIndsDk[k2+1])
# Get covariance between D_k1 and D_k2
if permdict[str(k1)+str(k2)] is None:
FisherInfoMat[np.ix_(IndsDk1, IndsDk2)],permdict[str(k1)+str(k2)] = get_covdldDk1Dk22D(k1, k2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict,perm=None)
else:
FisherInfoMat[np.ix_(IndsDk1, IndsDk2)],_ = get_covdldDk1Dk22D(k1, k2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict,perm=permdict[str(k1)+str(k2)])
# Get covariance between D_k1 and D_k2
FisherInfoMat[np.ix_(IndsDk2, IndsDk1)] = FisherInfoMat[np.ix_(IndsDk1, IndsDk2)].transpose()
# Check Fisher Information matrix is symmetric
FisherInfoMat = forceSym2D(FisherInfoMat)
# ----------------------------------------------------------------------
# Concatenate paramaters and derivatives together
# ----------------------------------------------------------------------
paramVector = np.concatenate((beta, np.array([[sigma2]])))
derivVector = np.concatenate((dldB, dldsigma2))
for k in np.arange(len(nraneffs)):
paramVector = np.concatenate((paramVector, mat2vech2D(Ddict[k])))
derivVector = np.concatenate((derivVector, dupMatTdict[k] @ mat2vec2D(dldDdict[k])))
# ----------------------------------------------------------------------
# Update step
# ----------------------------------------------------------------------
paramVector = paramVector + lam*(np.linalg.pinv(FisherInfoMat) @ derivVector)#lam*(np.linalg.solve(FisherInfoMat,derivVector))
# ----------------------------------------------------------------------
# Get the new parameters
# ----------------------------------------------------------------------
beta = paramVector[0:p]
sigma2 = paramVector[p:(p+1)][0,0]
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# D (dict version)
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(vech2mat2D(paramVector[FishIndsDk[k]:FishIndsDk[k+1]]))
# D (matrix version)
for i in np.arange(len(nraneffs)):
for j in np.arange(nlevels[i]):
if i == 0 and j == 0:
D = Ddict[i]
else:
D = scipy.linalg.block_diag(D, Ddict[i])
# --------------------------------------------------------------------------
# Matrices for next iteration
# --------------------------------------------------------------------------
# Recalculate Zte and ete
Zte = ZtY - (ZtX @ beta)
# Sum of squared residuals
ete = ssr2D(YtX, YtY, XtX, beta)
# Inverse of (I+Z'ZD) multiplied by D
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# Check sigma2 hasn't hit a boundary
if sigma2<0:
sigma2=1e-10
# ----------------------------------------------------------------------
# Update the step size and log likelihood
# ----------------------------------------------------------------------
llhcurr = llh2D(n, ZtZ, Zte, ete, sigma2, DinvIplusZtZD,D,reml,XtX,XtZ,ZtX)[0,0]
if llhprev>llhcurr:
lam = lam/2
# --------------------------------------------------------------------------
# Get b values
# --------------------------------------------------------------------------
bvals = DinvIplusZtZD @ Zte
return(paramVector, bvals, nit, llhcurr)
# ============================================================================
#
# This below function performs full Fisher Scoring for the Linear Mixed
# Model. It is based on the update rule:
#
# \theta_f = \theta_f + lam*I(\theta_f)^+ (dl/d\theta_f)
#
# Where \theta_f is the vector (beta, sigma2, vec(D1),...vec(Dr)), lam is a
# scalar stepsize, I(\theta_f) is the Fisher Information matrix of \theta_f
# and dl/d\theta_f is the derivative of the log likelihood of \theta_f with
# respect to \theta_f.
#
# Note that, as \theta_f is written in terms of 'vec', rather than 'vech',
# (full vector, 'f', rather than half-vector, 'h'), the information matrix
# will have repeated rows (due to \theta_f having repeated entries).
#
# ----------------------------------------------------------------------------
#
# This function takes as input;
#
# ----------------------------------------------------------------------------
#
# - `XtX`: X transpose multiplied by X.
# - `XtY`: X transpose multiplied by Y.
# - `XtZ`: X transpose multiplied by Z.
# - `YtX`: Y transpose multiplied by X.
# - `YtY`: Y transpose multiplied by Y.
# - `YtZ`: Y transpose multiplied by Z.
# - `ZtX`: Z transpose multiplied by X.
# - `ZtY`: Z transpose multiplied by Y.
# - `ZtZ`: Z transpose multiplied by Z.
# - `nlevels`: A vector containing the number of levels for each factor,
# e.g. `nlevels=[3,4]` would mean the first factor has 3 levels
# and the second factor has 4 levels.
# - `nraneffs`: A vector containing the number of random effects for each
# factor, e.g. `nraneffs=[2,1]` would mean the first factor has
# random effects and the second factor has 1 random effect.
# - `tol`: A scalar tolerance value. Iteration stops once successive
# log-likelihood values no longer exceed `tol`.
# - `n`: The number of observations.
# - `init_paramVector`: (Optional) initial estimates of the parameter vector.
#
# ----------------------------------------------------------------------------
#
# And returns:
#
# ----------------------------------------------------------------------------
#
# - `paramVector`: The parameter vector (beta,sigma2,vech(D_1),...,vech(D_r))
# - `bvals`: Estimates of the random effects vector, b.
# - `nit`: The number of iterations taken to converge.
#
# ============================================================================
def fFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=False, init_paramVector=None):
# ------------------------------------------------------------------------------
# Useful scalars
# ------------------------------------------------------------------------------
# Number of factors, r
r = len(nlevels)
# Number of random effects, q
q = np.sum(np.dot(nraneffs,nlevels))
# Number of fixed effects, p
p = XtX.shape[0]
# ------------------------------------------------------------------------------
# Index variables
# ------------------------------------------------------------------------------
# Work out the total number of parameters
tnp = np.int32(p + 1 + np.sum(nraneffs**2))
# Indices for submatrics corresponding to Dks
FishIndsDk = np.int32(np.cumsum(nraneffs**2) + p + 1)
FishIndsDk = np.insert(FishIndsDk,0,p+1)
# ------------------------------------------------------------------------------
# Duplication matrices
# ------------------------------------------------------------------------------
dupMatTdict = dict()
for i in np.arange(len(nraneffs)):
dupMatTdict[i] = dupMat2D(nraneffs[i]).transpose()
# ------------------------------------------------------------------------------
# Initial estimates
# ------------------------------------------------------------------------------
# Read in initial estimates if we have any
if init_paramVector is not None:
# Initial beta and sigma2
beta = init_paramVector[0:p]
sigma2 = init_paramVector[p:(p+1)][0,0]
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Initial D (dictionary version)
Ddict = dict()
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(vec2mat2D(init_paramVector[FishIndsDk[k]:FishIndsDk[k+1]]))
# Initial D (matrix version)
for i in np.arange(len(nraneffs)):
for j in np.arange(nlevels[i]):
if i == 0 and j == 0:
D = Ddict[i]
else:
D = scipy.linalg.block_diag(D, Ddict[i])
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Z'e, needed for first iteration
Zte = ZtY - (ZtX @ beta)
# Estimate initial values otherwise
else:
# Inital beta
beta = initBeta2D(XtX, XtY)
# Work out e'e
ete = ssr2D(YtX, YtY, XtX, beta)
# Initial sigma2
sigma2 = initSigma22D(ete, n)
sigma2 = np.maximum(sigma2,1e-10) # Prevent hitting boundary
# Z'e, needed for first iteration
Zte = ZtY - (ZtX @ beta)
# Inital D (Dictionary version)
Ddict = dict()
for k in np.arange(len(nraneffs)):
Ddict[k] = makeDpd2D(initDk2D(k, ZtZ, Zte, sigma2, nlevels, nraneffs, dupMatTdict))
# Inital D (Matrix version)
D = np.array([])
for i in np.arange(len(nraneffs)):
for j in np.arange(nlevels[i]):
if i == 0 and j == 0:
D = Ddict[i]
else:
D = scipy.linalg.block_diag(D, Ddict[i])
# ------------------------------------------------------------------------------
# Obtain D(I+Z'ZD)^(-1)
# ------------------------------------------------------------------------------
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# ------------------------------------------------------------------------------
# Initial lambda and likelihoods
# ------------------------------------------------------------------------------
# Step size lambda
lam = 1
# Initial log likelihoods
llhprev = np.inf
llhcurr = -np.inf
# ------------------------------------------------------------------------------
# Dicts to save repeated computation.
# ------------------------------------------------------------------------------
# This will hold the matrices: Sum_j^{l_k} Z_{i,j}'Z_{i,j}
ZtZmatdict = dict()
for k in np.arange(len(nraneffs)):
ZtZmatdict[k] = None
# This will hold the permutations needed for the covariance between the
# derivatives with respect to k1 and k2
permdict = dict()
for k1 in np.arange(len(nraneffs)):
for k2 in np.arange(len(nraneffs)):
permdict[str(k1)+str(k2)] = None
# ------------------------------------------------------------------------------
# Iteration
# ------------------------------------------------------------------------------
nit = 0
while np.abs(llhprev-llhcurr)>tol:
# Update number of iterations
nit = nit+1
# Change current likelihood to previous
llhprev = llhcurr
# ------------------------------------------------------------------------
# Matrices needed later by many calculations:
# ------------------------------------------------------------------------
# X transpose e and Z transpose e
Xte = XtY - (XtX @ beta)
# ------------------------------------------------------------------------
# Derivatives
# ------------------------------------------------------------------------
# Derivative wrt beta
dldB = get_dldB2D(sigma2, Xte, XtZ, DinvIplusZtZD, Zte)
# Derivative wrt sigma^2
dldsigma2 = get_dldsigma22D(n, ete, Zte, sigma2, DinvIplusZtZD, reml, p)
# For each factor, factor k, work out dl/dD_k
dldDdict = dict()
for k in np.arange(len(nraneffs)):
# Store it in the dictionary
if ZtZmatdict[k] is None:
dldDdict[k],ZtZmatdict[k] = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD,None, reml, ZtX, XtX)
else:
dldDdict[k],_ = get_dldDk2D(k, nlevels, nraneffs, ZtZ, Zte, sigma2, DinvIplusZtZD, ZtZmatdict[k], reml, ZtX, XtX)
# ------------------------------------------------------------------------
# Covariance of dl/dsigma2
# ------------------------------------------------------------------------
covdldsigma2 = n/(2*(sigma2**2))
# ------------------------------------------------------------------------
# Construct the Fisher Information matrix
# ------------------------------------------------------------------------
FisherInfoMat = np.zeros((tnp,tnp))
# Add dl/dbeta covariance
FisherInfoMat[np.ix_(np.arange(p),np.arange(p))] = get_covdldbeta2D(XtZ, XtX, ZtZ, DinvIplusZtZD, sigma2)
# Add dl/dsigma2 covariance
FisherInfoMat[p,p] = covdldsigma2
# Add dl/dsigma2 dl/dD covariance
for k in np.arange(len(nraneffs)):
# Assign to the relevant block
if ZtZmatdict[k] is None:
covdldDksigma2,ZtZmatdict[k] = get_covdldDkdsigma22D(k, sigma2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, vec=True, ZtZmat=None)
else:
covdldDksigma2,_ = get_covdldDkdsigma22D(k, sigma2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict, vec=True, ZtZmat=ZtZmatdict[k])
# Assign to the relevant block
FisherInfoMat[p, FishIndsDk[k]:FishIndsDk[k+1]] = covdldDksigma2.reshape(FishIndsDk[k+1]-FishIndsDk[k])
FisherInfoMat[FishIndsDk[k]:FishIndsDk[k+1],p] = FisherInfoMat[p, FishIndsDk[k]:FishIndsDk[k+1]].transpose()
# Add dl/dD covariance for each pair (k1,k2) of random factors
for k1 in np.arange(len(nraneffs)):
for k2 in np.arange(k1+1):
# Work out the indices of D_k1 and D_k2
IndsDk1 = np.arange(FishIndsDk[k1],FishIndsDk[k1+1])
IndsDk2 = np.arange(FishIndsDk[k2],FishIndsDk[k2+1])
# Get covariance between D_k1 and D_k2
if permdict[str(k1)+str(k2)] is None:
FisherInfoMat[np.ix_(IndsDk1, IndsDk2)],permdict[str(k1)+str(k2)] = get_covdldDk1Dk22D(k1, k2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict,vec=True,perm=None)
else:
FisherInfoMat[np.ix_(IndsDk1, IndsDk2)],_ = get_covdldDk1Dk22D(k1, k2, nlevels, nraneffs, ZtZ, DinvIplusZtZD, dupMatTdict,vec=True,perm=permdict[str(k1)+str(k2)])
FisherInfoMat[np.ix_(IndsDk2, IndsDk1)] = FisherInfoMat[np.ix_(IndsDk1, IndsDk2)].transpose()
# Check Fisher Information matrix is symmetric
FisherInfoMat = forceSym2D(FisherInfoMat)
# --------------------------------------------------------------------------
# Concatenate paramaters and derivatives together
# --------------------------------------------------------------------------
paramVector = np.concatenate((beta, | np.array([[sigma2]]) | numpy.array |
import numpy as np
import sys
#read the data from the file
data = open(sys.argv[1],'r').read()
characters = list(set(data))
data_size, vocab_size = len(data),len(characters)
print("Data has %d characters, %d unique characters."%(data_size,vocab_size))
#char to idx mapping
char_to_idx = {ch:i for i,ch in enumerate(characters)}
idx_to_char = {i:ch for i,ch in enumerate(characters)}
#define some hyperparameters
hidden_size = 100
seq_length = 25
learning_rate = 1e-2
#model parameters
Wxi = np.random.randn(hidden_size,vocab_size)*0.01
Whi = np.random.randn(hidden_size,hidden_size)*0.01
bi = np.zeros((hidden_size,1))
Wxr = np.random.randn(hidden_size,vocab_size)*0.01
Whr = np.random.randn(hidden_size,hidden_size)*0.01
br = np.zeros((hidden_size,1))
Wxh = np.random.randn(hidden_size,vocab_size)*0.01
Whh = np.random.randn(hidden_size,hidden_size)*0.01
bh = np.zeros((hidden_size,1))
Why = np.random.randn(vocab_size,hidden_size)*0.01
by = np.zeros((vocab_size,1))
def sigmoid(x):
return 1/(1+np.exp(-x))
def softmax(input):
# Subtraction of max value improves numerical stability.
e_input = np.exp(input - np.max(input))
return e_input / e_input.sum()
def gru(inputs,targets,hprev):
"""
inputs and targets are both lists of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model params and last hidden state
"""
x,r,i,h,h_hat,y,p = {},{},{},{},{},{},{}
#copy the hprev to last element of hs dict
h[-1] = np.copy(hprev)
loss = 0
#forward pass
for t in range(len(inputs)):
x[t] = np.zeros((vocab_size,1)) #encode in 1-of-k representation
x[t][inputs[t]]=1
r[t] = sigmoid(np.dot(Whr,h[t-1]) + np.dot(Wxr,x[t]) + br)
i[t] = sigmoid(np.dot(Whi,h[t-1]) + np.dot(Wxi,x[t]) + bi)
h_hat[t] = np.tanh(np.dot(Whh,np.multiply(r[t],h[t-1])) + np.dot(Wxh,x[t]) + bh)
h[t] = np.multiply(i[t],h[t-1]) + np.multiply((1-i[t]), h_hat[t])
y[t] = np.dot(Why,h[t]) + by
p[t] = softmax(y[t])
loss += -np.log(p[t][targets[t],0])
#backward pass
dWhy,dWhi,dWhr,dWhh,dWxi,dWxr,dWxh = np.zeros_like(Why),np.zeros_like(Whi),np.zeros_like(Whr),np.zeros_like(Whh),np.zeros_like(Wxi),np.zeros_like(Wxr),np.zeros_like(Wxh)
dby,dbi,dbr,dbh = np.zeros_like(by),np.zeros_like(bi), | np.zeros_like(br) | numpy.zeros_like |
import numpy as np
import unittest
from pytest import importorskip
cl = importorskip('pyopencl')
import pysph.base.particle_array
from pysph.base.device_helper import DeviceHelper # noqa: E402
from pysph.base.utils import get_particle_array # noqa: E402
from pysph.base.tree.point_tree import PointTree # noqa: E402
def _gen_uniform_dataset_2d(n, h, seed=None):
if seed is not None:
np.random.seed(seed)
u = np.random.uniform
pa = get_particle_array(x=u(size=n), y=u(size=n), h=h)
h = DeviceHelper(pa, backend='opencl')
pa.set_device_helper(h)
return pa
def _gen_uniform_dataset(n, h, seed=None):
if seed is not None:
np.random.seed(seed)
u = np.random.uniform
pa = get_particle_array(x=u(size=n), y=u(size=n), z=u(size=n), h=h)
h = DeviceHelper(pa, backend='opencl')
pa.set_device_helper(h)
return pa
def _dfs_find_leaf(tree):
leaf_id_count = tree.allocate_leaf_prop(np.int32)
dfs_find_leaf = tree.leaf_tree_traverse(
"int *leaf_id_count",
setup="leaf_id_count[i] = 0;",
node_operation="if (cid_dst == cid_src) leaf_id_count[i]++",
leaf_operation="if (cid_dst == cid_src) leaf_id_count[i]++",
output_expr=""
)
dfs_find_leaf(tree, tree, leaf_id_count.dev)
return leaf_id_count.dev.get()
def _check_children_overlap_2d(node_xmin, node_xmax, child_offset):
for j in range(4):
nxmin1 = node_xmin[child_offset + j]
nxmax1 = node_xmax[child_offset + j]
for k in range(4):
nxmin2 = node_xmin[child_offset + k]
nxmax2 = node_xmax[child_offset + k]
if j != k:
assert (nxmax1[0] <= nxmin2[0] or nxmax2[0] <= nxmin1[0] or
nxmax1[1] <= nxmin2[1] or nxmax2[1] <= nxmin1[1])
def _check_children_overlap(node_xmin, node_xmax, child_offset):
for j in range(8):
nxmin1 = node_xmin[child_offset + j]
nxmax1 = node_xmax[child_offset + j]
for k in range(8):
nxmin2 = node_xmin[child_offset + k]
nxmax2 = node_xmax[child_offset + k]
if j != k:
assert (nxmax1[0] <= nxmin2[0] or nxmax2[0] <= nxmin1[0] or
nxmax1[1] <= nxmin2[1] or nxmax2[1] <= nxmin1[1] or
nxmax1[2] <= nxmin2[2] or nxmax2[2] <= nxmin1[2])
def _test_tree_structure(tree, k):
# Traverse tree and check if max depth is correct
# Additionally check if particle sets of siblings is disjoint
# and union of particle sets of a nodes children = nodes own children
#
# This effectively also checks that no particle is present in two nodes of
# the same level
s = [0, ]
d = [0, ]
offsets = tree.offsets.dev.get()
pbounds = tree.pbounds.dev.get()
max_depth = tree.depth
max_depth_here = 0
pids = set()
while len(s) != 0:
n = s[0]
depth = d[0]
max_depth_here = max(max_depth_here, depth)
pbound = pbounds[n]
assert (depth <= max_depth)
del s[0]
del d[0]
if offsets[n] == -1:
for i in range(pbound[0], pbound[1]):
pids.add(i)
continue
# Particle ranges of children are contiguous
# and are contained within parent's particle range
start = pbound[0]
for i in range(k):
child_idx = offsets[n] + i
assert (pbounds[child_idx][0] == start)
assert (pbounds[child_idx][0] <= pbounds[child_idx][1])
start = pbounds[child_idx][1]
assert (child_idx < len(offsets))
s.append(child_idx)
d.append(depth + 1)
assert (start == pbound[1])
class QuadtreeTestCase(unittest.TestCase):
def setUp(self):
use_double = False
self.N = 3000
pa = _gen_uniform_dataset_2d(self.N, 0.2, seed=0)
self.quadtree = PointTree(pa, radius_scale=1., use_double=use_double,
leaf_size=32, dim=2)
self.leaf_size = 32
self.quadtree.refresh(np.array([0., 0.]), np.array([1., 1.]),
np.min(pa.h))
self.pa = pa
def test_pids(self):
pids = self.quadtree.pids.dev.get()
s = set()
for i in range(len(pids)):
if 0 <= pids[i] < self.N:
s.add(pids[i])
assert (len(s) == self.N)
def test_depth_and_inclusiveness(self):
_test_tree_structure(self.quadtree, 4)
def test_node_bounds(self):
self.quadtree.set_node_bounds()
pids = self.quadtree.pids.dev.get()
offsets = self.quadtree.offsets.dev.get()
pbounds = self.quadtree.pbounds.dev.get()
node_xmin = self.quadtree.node_xmin.dev.get()
node_xmax = self.quadtree.node_xmax.dev.get()
node_hmax = self.quadtree.node_hmax.dev.get()
x = self.pa.x[pids]
y = self.pa.y[pids]
h = self.pa.h[pids]
for i in range(len(offsets)):
nxmin = node_xmin[i]
nxmax = node_xmax[i]
nhmax = node_hmax[i]
for j in range(pbounds[i][0], pbounds[i][1]):
assert (nxmin[0] <= np.float32(x[j]) <= nxmax[0])
assert (nxmin[1] <= np.float32(y[j]) <= nxmax[1])
assert (np.float32(h[j]) <= nhmax)
# Check that children nodes don't overlap
if offsets[i] != -1:
_check_children_overlap_2d(node_xmin, node_xmax, offsets[i])
def test_dfs_traversal(self):
leaf_id_count = _dfs_find_leaf(self.quadtree)
np.testing.assert_array_equal(
| np.ones(self.quadtree.unique_cid_count, dtype=np.int32) | numpy.ones |
import numpy as np
import logging
from scipy.stats import truncnorm
def selection(Z):
"""
Characterising selecting the top K Models from vector Z as a linear
combination.
input
Z : "Feature vector" with a normal distribution.
K : Number of selections.
return
ind_sel: Selected index.
A,b : The linear combination of the selection event Az < b.
"""
N = np.shape(Z)[0]
## Sorted list of Z
ind_sorted = np.argsort(Z)
## Pick top k
ind_sel = ind_sorted[0]
A = np.zeros((N-1,N))
for i in range(N-1):
A[i, ind_sorted[0]] = 1
A[i, ind_sorted[i+1]] = -1
b = np.zeros((N-1))
assert np.sum(np.matmul(A,Z) > 0) ==0, "Assumption error"
return ind_sel, A, b
def psi_inf(A,b,eta, mu, cov, z):
"""
Returns the p-value of the truncated normal. The mean,
variance, and truncated points [a,b] is determined by Lee et al 2016.
"""
l_thres, u_thres= calculate_threshold(z, A, b, eta, cov)
sigma2 = np.matmul(eta,np.matmul(cov,eta))
scale = np.sqrt(sigma2)
params = {"u_thres":u_thres,
"l_thres":l_thres,
"mean": np.matmul(eta,mu),
"scale":scale,
}
ppf = lambda x: truncnorm_ppf(x,
l_thres,
u_thres,
loc=np.matmul(eta,mu),
scale=scale)
sf = lambda x: truncnorm.sf(x, l_thres/scale, u_thres/scale, scale=scale)
return ppf, sf
def calculate_threshold(z, A, b, eta, cov):
"""
Calculates the respective threshold for the method PSI_Inf.
"""
etaz = eta.dot(z)
Az = A.dot(z)
Sigma_eta = cov.dot(eta)
deno = Sigma_eta.dot(eta)
alpha = A.dot(Sigma_eta)/deno
assert(np.shape(A)[0] == np.shape(alpha)[0])
pos_alpha_ind = np.argwhere(alpha>0).flatten()
neg_alpha_ind = np.argwhere(alpha<0).flatten()
acc = (b - np.matmul(A,z))/alpha+np.matmul(eta,z)
if (np.shape(neg_alpha_ind)[0] > 0):
l_thres = np.max(acc[neg_alpha_ind])
else:
l_thres = -10.0**10
if (np.shape(pos_alpha_ind)[0] > 0):
u_thres = np.min(acc[pos_alpha_ind])
else:
u_thres= 10**10
return l_thres, u_thres
def test_significance(A, b, eta, mu, cov, z, alpha):
"""
Compute an p-value by testing a one-tail.
Look at right tail or left tail?
Returns "h_0 Reject
"""
ppf, sf = psi_inf(A, b, eta, mu, cov, z)
stat = np.matmul(eta,z) ## Test statistic
sigma = np.sqrt(np.matmul(eta,np.matmul(cov,eta)))
## If the std dev is < 0 or undefined, do not reject the hypothesis.
if np.isnan(sigma) or not np.isreal(sigma):
logging.warning("Scale is not real or negative, test reject")
return False, 1.
threshold = ppf(1.-alpha)
pval = sf(stat)
return stat > threshold, pval
def generateEta(ind_sel, n_models):
"""
Generate multiple etas corresponding to testing
within the selected indices.
"""
etas = np.zeros((n_models-1, n_models))
for i in range(n_models-1):
index = i if i < ind_sel else i +1
etas[i,ind_sel] = -1
etas[i,index]=1
return etas
def truncnorm_ppf(x, a, b,loc=0., scale=1.):
"""
Approximate Percentile function of the truncated normal. Particularly in
the tail regions (where the standard SciPy function may be undefined.
"""
thres = truncnorm.ppf(x,(a-loc)/scale,(b-loc)/scale,loc=loc, scale=scale)
if np.any( | np.isnan(thres) | numpy.isnan |
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerBoxCloseEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.5, 0.02)
obj_high = (0.05, 0.55, 0.02)
goal_low = (-0.1, 0.7, 0.133)
goal_high = (0.1, 0.8, 0.133)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.55, 0.02], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0.0, 0.75, 0.133])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._target_to_obj_init = None
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
@property
def model_name(self):
if self.use_franka: # franka
return full_v2_path_for('franka_xyz/franka_box.xml')
else:
return full_v2_path_for('sawyer_xyz/sawyer_box.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
reward_grab,
reward_ready,
reward_success,
success
) = self.compute_reward(action, obs)
info = {
'success': float(success),
'near_object': reward_ready,
'grasp_success': reward_grab >= 0.5,
'grasp_reward': reward_grab,
'in_place_reward': reward_success,
'obj_to_target': 0,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return []
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('BoxHandleGeom')
def _get_pos_objects(self):
return self.get_body_com('top_link')
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('top_link')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
box_height = self.get_body_com('boxbody')[2]
if self.random_init:
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.25:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self._target_pos = goal_pos[-3:]
self.sim.model.body_pos[self.model.body_name2id('boxbody')] = np.concatenate((self._target_pos[:2], [box_height]))
self._set_obj_xyz(self.obj_init_pos)
return self._get_obs()
@staticmethod
def _reward_grab_effort(actions):
return (np.clip(actions[3], -1, 1) + 1.0) / 2.0
@staticmethod
def _reward_quat(obs):
# Ideal upright lid has quat [.707, 0, 0, .707]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([0.707, 0, 0, 0.707])
error = np.linalg.norm(obs[7:11] - ideal)
return max(1.0 - error/0.2, 0.0)
@staticmethod
def _reward_pos(obs, target_pos):
hand = obs[:3]
lid = obs[4:7] + np.array([.0, .0, .02])
threshold = 0.02
# floor is a 3D funnel centered on the lid's handle
radius = np.linalg.norm(hand[:2] - lid[:2])
if radius <= threshold:
floor = 0.0
else:
floor = 0.04 * np.log(radius - threshold) + 0.4
# prevent the hand from running into the handle prematurely by keeping
# it above the "floor"
above_floor = 1.0 if hand[2] >= floor else reward_utils.tolerance(
floor - hand[2],
bounds=(0.0, 0.01),
margin=floor / 2.0,
sigmoid='long_tail',
)
# grab the lid's handle
in_place = reward_utils.tolerance(
np.linalg.norm(hand - lid),
bounds=(0, 0.02),
margin=0.5,
sigmoid='long_tail',
)
ready_to_lift = reward_utils.hamacher_product(above_floor, in_place)
# now actually put the lid on the box
pos_error = target_pos - lid
error_scale = np.array([1., 1., 3.]) # Emphasize Z error
a = 0.2 # Relative importance of just *trying* to lift the lid at all
b = 0.8 # Relative importance of placing the lid on the box
lifted = a * float(lid[2] > 0.04) + b * reward_utils.tolerance(
np.linalg.norm(pos_error * error_scale),
bounds=(0, 0.05),
margin=0.25,
sigmoid='long_tail',
)
return ready_to_lift, lifted
def compute_reward(self, actions, obs):
reward_grab = SawyerBoxCloseEnvV2._reward_grab_effort(actions)
reward_quat = SawyerBoxCloseEnvV2._reward_quat(obs)
reward_steps = SawyerBoxCloseEnvV2._reward_pos(obs, self._target_pos)
reward = sum((
2.0 * reward_utils.hamacher_product(reward_grab, reward_steps[0]),
8.0 * reward_steps[1],
))
# Override reward on success
success = | np.linalg.norm(obs[4:7] - self._target_pos) | numpy.linalg.norm |
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Range
from geometry_msgs.msg import Pose
from rfid_node.msg import TagData
import math
import tf
import numpy as np
import sklearn
from sklearn import linear_model
from threading import Lock
from distutils.version import StrictVersion
from datetime import datetime
import sys
def distance(poseA,poseB):
'''
:param poseA:
:param poseB:
:return: euclidean distance between poses
'''
dist = math.sqrt(pow(poseA.position.x -
poseB.position.x, 2) +
pow(poseA.position.y -
poseB.position.y, 2) +
pow(poseA.position.z -
poseB.position.z, 2))
return dist
def getYawFromPose(aPose):
'''
:param aPose:
:return: yaw angle of pose's orientation
'''
quatA = (
aPose.orientation.x,
aPose.orientation.y,
aPose.orientation.z,
aPose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quatA)
# roll = euler[0] pitch = euler[1]
yawA = euler[2]
return yawA
def yawBetweenPoses(poseA, poseB):
'''
:param poseA:
:param poseB:
:return: yaw angle difference yawB-yawA
'''
yawA=getYawFromPose(poseA)
yawB = getYawFromPose(poseB)
return yawB-yawA
def buildDiffVector(vector,diffSize):
'''
Calculates up to diffSize possible differences between elements of a vector
:param vector: vector to be opperated
:param diffSize: maximum size of differences vector (may be up to ( (vector.size-1) /2.0)*vector.size )
:return:
'''
diffSize=min(diffSize, ( (vector.size-1) /2.0)*vector.size )
diffV = []
for i in range(0, vector.size - 1):
diffV = np.concatenate((diffV, vector[i + 1:] - vector[i]), axis=1)
if diffV.size>diffSize:
break
return diffV[:diffSize]
def solvePhaseAmbiguity(X0,y0):
X = X0.reshape(-1, 1)
y = y0.reshape(-1, 1)
# 1.- calculate average y at each X value
xUnique = np.unique(X)
yUnique = np.zeros(xUnique.size)
for i in range(0, xUnique.size):
yUnique[i] = np.mean(y[X == xUnique[i]])
# 2.-roughly filter before looking for sign changes
yUnique2 = np.zeros(xUnique.size)
yUnique2[0] = yUnique[0]
for i in range(1, yUnique.size):
yUnique2[i] = 0.5 * yUnique[i - 1] + 0.5 * yUnique[i]
yUnique = yUnique2
#3.-find first derivate: negative values are descending trend
yd = np.array([1])
yd = np.append(yd, np.sign(yUnique[1:] - yUnique[:-1]))
indexes = (yd == -1)
#4.-select relevant points to infer new phase addition
xSel = xUnique[indexes]
ySel = yUnique[indexes]
#this means I couldn't find any desdendent trend
if ySel.size==0:
return y0
#5.-usinge these points, calculate extra phase vector
prevY = ySel[0]
extraPhase = 0
phaseVector = np.zeros(ySel.size)
for i in range(0, ySel.size):
if ySel[i] > prevY:
extraPhase -= math.pi
prevY = ySel[i]
phaseVector[i] = extraPhase
#6.-apply these changes to the hole vector
found = False
finalY = np.zeros(y.size)
for i in range(0, X.size):
for j in range(0, xSel.size):
if X[i] == xSel[j]:
finalY[i] = y[i] + phaseVector[j]
found = True
break
if not found:
finalY[i] = y[i]
found = False
return finalY
# Node class.
class TagLocatorNode():
def ransacEstimation(self):
'''
Using tag stored readings, calls ransac to estimate radius
:return: estimated radius
'''
R=-1
numHip = min(self.numHipot, self.rssiVector.size)
#sort both of them usind df as reference
orderIndexs=np.argsort(self.freqVector)
self.freqVector = self.freqVector[orderIndexs]
self.phaseVector = self.phaseVector[orderIndexs]
# create a set of frequency and phase differences
df = buildDiffVector(self.freqVector, numHip)
dp = buildDiffVector(self.phaseVector, numHip)
dpUnwrapped=solvePhaseAmbiguity(df,dp)
df = df.reshape(-1, 1)
dpUnwrapped = dpUnwrapped.reshape(-1, 1)
# feed into RANSAC Regressor...
mr = sklearn.linear_model.RANSACRegressor(sklearn.linear_model.LinearRegression())
mr.fit(df, dpUnwrapped)
R = -mr.estimator_.coef_ * self.C / (4.0 * math.pi)
if 0:
rospy.logerr("Using %d points",self.rssiVector.size)
rospy.logerr("self.freqVector: %s",np.array_str(self.freqVector))
rospy.logerr("self.phaseVector: %s",np.array_str(self.phaseVector))
rospy.logerr("df: %s",np.array_str(df))
rospy.logerr("dpUnwrapped: %s",np.array_str(dpUnwrapped))
rospy.logerr("mr.estimator_.coef_ : %s",mr.estimator_.coef_ )
rospy.logerr("R: %s",R )
rospy.logerr("Deleting after ransac")
return R
def odomCallback(self,data):
'''
Each time robot moves, we publish to clean tag data.
Raises internal flag to request it
:param data:
:return:
'''
newPose=data.pose.pose
self.distInc=distance(newPose,self.prevPose)
if (self.distInc>self.distThresh) or (yawBetweenPoses(newPose,self.prevPose)>self.angThresh) :
self.singlePub()
self.dataLock.acquire()
try:
#delete values, either we have used them or are too few
self.rssiVector = np.array([])
self.phaseVector = np.array([])
self.freqVector = np.array([])
finally:
self.dataLock.release()
self.prevPose=newPose
def tagCallback(self,data):
'''
Stores received tag data into buffer.
Handles new position flag
:param data:
:return:
'''
#print("...newTag:", data.ID, data.stats[0].rssi, data.stats[0].phase, data.stats[0].frequency)
if (data.ID.upper() == self.tagNAME.upper()):
# some versions of RFID library return values in degs and KHz
if 0:
if data.stats[0].frequency<10e8:
print("...newTag:", data.ID, data.stats[0].rssi, data.stats[0].phase*math.pi/180.0, data.stats[0].frequency*1000.0)
else:
print("...newTag:", data.ID, data.stats[0].rssi, data.stats[0].phase, data.stats[0].frequency)
id = data.ID
rssi = float(data.stats[0].rssi)
# RFID library return values in degs and KHz
phase = float(data.stats[0].phase)*math.pi/180.0
freq = float(data.stats[0].frequency)*1000.0
#add new entry to vectors
self.dataLock.acquire()
try:
self.rssiVector = np.append(self.rssiVector, rssi)
self.phaseVector = | np.append(self.phaseVector, phase) | numpy.append |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
import random
import math
import os
import time
from utils.VLSW import pad_all_cases
# from VLSW import pad_all_cases
# set the random seeds for reproducability
SEED = 1234
random.seed(SEED)
def preprocess_df(df):
""" The training and testing data are manually selected.
:param df: dataframe with raw data
:return:
"""
df.set_index('date', inplace=True)
# Imputation Target
N0 = df['N0'].values.copy().reshape(-1, 1)
# Standlization, use MinMaxScaler
scaler_x = MinMaxScaler()
scaler_x.fit(
df[['N0', 'E0', 'S0', 'W0', 'N1', 'E1', 'S1', 'W1','N2', 'E2', 'S2', 'W2','N3', 'E3', 'S3', 'W3','N4', 'E4', 'S4', 'W4','N5', 'E5', 'S5', 'W5','N6', 'E6', 'S6', 'W6','N7', 'E7', 'S7', 'W7','N8', 'E8', 'S8', 'W8','N9', 'E9', 'S9', 'W9','N10', 'E10', 'S10', 'W10','N11', 'E11', 'S11', 'W11','N12', 'E12', 'S12', 'W12']])
df[['N0', 'E0', 'S0', 'W0', 'N1', 'E1', 'S1', 'W1','N2', 'E2', 'S2', 'W2','N3', 'E3', 'S3', 'W3','N4', 'E4', 'S4', 'W4','N5', 'E5', 'S5', 'W5','N6', 'E6', 'S6', 'W6','N7', 'E7', 'S7', 'W7','N8', 'E8', 'S8', 'W8','N9', 'E9', 'S9', 'W9','N10', 'E10', 'S10', 'W10','N11', 'E11', 'S11', 'W11','N12', 'E12', 'S12', 'W12']] = scaler_x.transform(df[['N0', 'E0', 'S0', 'W0', 'N1', 'E1', 'S1', 'W1','N2', 'E2', 'S2', 'W2','N3', 'E3', 'S3', 'W3','N4', 'E4', 'S4', 'W4','N5', 'E5', 'S5', 'W5','N6', 'E6', 'S6', 'W6','N7', 'E7', 'S7', 'W7','N8', 'E8', 'S8', 'W8','N9', 'E9', 'S9', 'W9','N10', 'E10', 'S10', 'W10','N11', 'E11', 'S11', 'W11','N12', 'E12', 'S12', 'W12']])
scaler_y = MinMaxScaler()
scaler_y.fit(N0)
y_all = scaler_y.transform(N0)
df_train = df.loc['2008/1/1 7:00':'2008/1/10 23:45'].copy()
df_test = df.loc['2008/1/11 0:00':'2008/1/13 6:45'].copy()
return df_train, df_test, scaler_x, scaler_y
def train_val_test_generate(dataframe, model_params):
'''
:param dataframe: processed dataframe
:param model_params: for input dim
:return: train_x, train_y, test_x, test_y with the same length (by padding zero)
'''
train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples = pad_all_cases(
dataframe, dataframe['N0'].values, model_params,
model_params['min_before'], model_params['max_before'],
model_params['min_after'], model_params['max_after'],
model_params['output_length'])
train_val_test_y = np.expand_dims(train_val_test_y, axis=2)
return train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples
def train_test_split_SSIM(x, y, x_len, x_before_len, model_params, SEED):
'''
:param x: all x samples
:param y: all y samples
:param model_params: parameters
:param SEED: random SEED
:return: train set, test set
'''
# check and remove samples with NaN (just incase)
index_list = []
for index, (x_s, y_s, len_s,
len_before_s) in enumerate(zip(x, y, x_len, x_before_len)):
if (np.isnan(x_s).any()) or ( | np.isnan(y_s) | numpy.isnan |
import numpy as np
import cv2
import time
import scipy
Tr_velo_to_cam = np.array(
[[7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04, -4.069766000000e-03],
[1.480249000000e-02, 7.280733000000e-04, -9.998902000000e-01, -7.631618000000e-02],
[9.998621000000e-01, 7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01],
[0, 0, 0, 1]])
Tr_cam_to_velo = np.linalg.inv(Tr_velo_to_cam)
caliv = np.array(
[[9.786977e+02, 0.000000e+00, 6.900000e+02, 0],
[0.000000e+00, 9.717435e+02, 2.497222e+02, 0],
[0.000000e+00, 0.000000e+00, 1.000000e+00, 0],
[0, 0, 0, 0]])
trans = np.array(
[[0, 0, 0, 0],
[0, 0, 0, 0.0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
)
caliv_trans = np.matmul(caliv, trans)
P0 = np.array(
[[7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 0.000000000000e+00],
[0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 0.000000000000e+00],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 0.000000000000e+00],
[0, 0, 0, 1]])
P1 = np.array(
[[7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, -3.875744000000e+02],
[0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 0.000000000000e+00],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 0.000000000000e+00],
[0, 0, 0, 1]])
P2 = np.array(
[[7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 4.485728000000e+01],
[0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.163791000000e-01],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.745884000000e-03],
[0, 0, 0, 1]])
P3 = np.array(
[[7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, -3.395242000000e+02],
[0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.199936000000e+00],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.729905000000e-03],
[0, 0, 0, 1]])
kitti_proj_mat = P0 + caliv_trans
kitti_proj_mat_inv = np.linalg.inv(kitti_proj_mat)
offset = np.zeros((64,64,64,3))
for i in range(64):
for j in range(64):
for k in range(64):
offset[i,j,k,:] = i,j,k
def matmul3x3(a, b):
c00 = a[0, 0] * b[0, 0] + a[0, 1] * b[1, 0] + a[0, 2] * b[2, 0]
c01 = a[0, 0] * b[0, 1] + a[0, 1] * b[1, 1] + a[0, 2] * b[2, 1]
c02 = a[0, 0] * b[0, 2] + a[0, 1] * b[1, 2] + a[0, 2] * b[2, 2]
c10 = a[1, 0] * b[0, 0] + a[1, 1] * b[1, 0] + a[1, 2] * b[2, 0]
c11 = a[1, 0] * b[0, 1] + a[1, 1] * b[1, 1] + a[1, 2] * b[2, 1]
c12 = a[1, 0] * b[0, 2] + a[1, 1] * b[1, 2] + a[1, 2] * b[2, 2]
c20 = a[2, 0] * b[0, 0] + a[2, 1] * b[1, 0] + a[2, 2] * b[2, 0]
c21 = a[2, 0] * b[0, 1] + a[2, 1] * b[1, 1] + a[2, 2] * b[2, 1]
c22 = a[2, 0] * b[0, 2] + a[2, 1] * b[1, 2] + a[2, 2] * b[2, 2]
return np.array([[c00, c01, c02],
[c10, c11, c12],
[c20, c21, c22]])
def matmul4x4(a, b):
c00 = a[0, 0] * b[0, 0] + a[0, 1] * b[1, 0] + a[0, 2] * b[2, 0] + a[0, 3] * b[3, 0]
c01 = a[0, 0] * b[0, 1] + a[0, 1] * b[1, 1] + a[0, 2] * b[2, 1] + a[0, 3] * b[3, 1]
c02 = a[0, 0] * b[0, 2] + a[0, 1] * b[1, 2] + a[0, 2] * b[2, 2] + a[0, 3] * b[3, 2]
c03 = a[0, 0] * b[0, 3] + a[0, 1] * b[1, 3] + a[0, 2] * b[2, 3] + a[0, 3] * b[3, 3]
c10 = a[1, 0] * b[0, 0] + a[1, 1] * b[1, 0] + a[1, 2] * b[2, 0] + a[1, 3] * b[3, 0]
c11 = a[1, 0] * b[0, 1] + a[1, 1] * b[1, 1] + a[1, 2] * b[2, 1] + a[1, 3] * b[3, 1]
c12 = a[1, 0] * b[0, 2] + a[1, 1] * b[1, 2] + a[1, 2] * b[2, 2] + a[1, 3] * b[3, 2]
c13 = a[1, 0] * b[0, 3] + a[1, 1] * b[1, 3] + a[1, 2] * b[2, 3] + a[1, 3] * b[3, 3]
c20 = a[2, 0] * b[0, 0] + a[2, 1] * b[1, 0] + a[2, 2] * b[2, 0] + a[2, 3] * b[3, 0]
c21 = a[2, 0] * b[0, 1] + a[2, 1] * b[1, 1] + a[2, 2] * b[2, 1] + a[2, 3] * b[3, 1]
c22 = a[2, 0] * b[0, 2] + a[2, 1] * b[1, 2] + a[2, 2] * b[2, 2] + a[2, 3] * b[3, 2]
c23 = a[2, 0] * b[0, 3] + a[2, 1] * b[1, 3] + a[2, 2] * b[2, 3] + a[2, 3] * b[3, 3]
c30 = a[3, 0] * b[0, 0] + a[3, 1] * b[1, 0] + a[3, 2] * b[2, 0] + a[3, 3] * b[3, 0]
c31 = a[3, 0] * b[0, 1] + a[3, 1] * b[1, 1] + a[3, 2] * b[2, 1] + a[3, 3] * b[3, 1]
c32 = a[3, 0] * b[0, 2] + a[3, 1] * b[1, 2] + a[3, 2] * b[2, 2] + a[3, 3] * b[3, 2]
c33 = a[3, 0] * b[0, 3] + a[3, 1] * b[1, 3] + a[3, 2] * b[2, 3] + a[3, 3] * b[3, 3]
return np.array([[c00, c01, c02, c03],
[c10, c11, c12, c13],
[c20, c21, c22, c23],
[c30, c31, c32, c33]])
def matmul3x1(a,b):
c0 = a[0,0] * b[0] + a[0,1] * b[1] + a[0,2] * b[2]
c1 = a[1,0] * b[0] + a[1,1] * b[1] + a[1,2] * b[2]
c2 = a[2,0] * b[0] + a[2,1] * b[1] + a[2,2] * b[2]
return np.array([c0,c1,c2])
def matmul4x1(a, b):
c0 = a[0,0] * b[0] + a[0,1] * b[1] + a[0,2] * b[2] + a[0,3] * b[3]
c1 = a[1,0] * b[0] + a[1,1] * b[1] + a[1,2] * b[2] + a[1,3] * b[3]
c2 = a[2,0] * b[0] + a[2,1] * b[1] + a[2,2] * b[2] + a[2,3] * b[3]
c3 = a[3,0] * b[0] + a[3,1] * b[1] + a[3,2] * b[2] + a[3,3] * b[3]
return np.array([c0,c1,c2,c3])
def getTranslation(proj_mat, R, bbox2D, bbox3D):
x_min, y_min, x_max, y_max = bbox2D
w, h, l = bbox3D
dx, dy, dz = w / 2., l / 2., h / 2.
measure_max = -9999.
trans_final = np.zeros(4)
xmin_set_list = [[[-dx, -dy, -dz], [-dx, -dy, dz]], [[-dx, dy, -dz], [-dx, dy, dz]]]
xmax_set_list = [[[dx, dy, -dz], [dx, dy, dz]], [[dx, -dy, dz], [dx, -dy, -dz]]]
# ymin_set_list = [[[-dx, -dy, dz], [dx, -dy, dz]], [[-dx, dy, dz], [dx, dy, dz]]]
# ymax_set_list = [[[-dx, dy, -dz], [dx, dy, -dz]], [[-dx, -dy, -dz], [dx, -dy, -dz]]]
ymin_set_list = [[[-dx, -dy, dz], [dx, -dy, dz], [-dx, dy, dz], [dx, dy, dz]]]
ymax_set_list = [[[-dx, dy, -dz], [dx, dy, -dz], [-dx, -dy, -dz], [dx, -dy, -dz]]]
A0_set_list, A1_set_list, A2_set_list, A3_set_list = [], [], [], []
B0_set_list, B1_set_list, B2_set_list, B3_set_list = [], [], [], []
for xmin_set in xmin_set_list + xmax_set_list:
A0_set, B0_set = [], []
for d_xmin in xmin_set:
A0 = np.concatenate([np.identity(3), np.reshape(matmul3x1(R, d_xmin), (3, 1))], axis=-1)
A0 = np.concatenate([A0, np.reshape([0, 0, 0, 1], (1, 4))], axis=0)
A0 = matmul4x4(proj_mat, A0)
B0_set.append(A0)
A0 = A0[0, :] - x_min * A0[2, :]
A0_set.append(A0)
A0_set_list.append(A0_set)
B0_set_list.append(B0_set)
for xmax_set in xmax_set_list + xmin_set_list:
A2_set, B2_set = [], []
for d_xmax in xmax_set:
A2 = np.concatenate([np.identity(3), np.reshape(matmul3x1(R, d_xmax), (3, 1))], axis=-1)
A2 = np.concatenate([A2, np.reshape([0, 0, 0, 1], (1, 4))], axis=0)
A2 = matmul4x4(proj_mat, A2)
B2_set.append(A2)
A2 = A2[0, :] - x_max * A2[2, :]
A2_set.append(A2)
A2_set_list.append(A2_set)
B2_set_list.append(B2_set)
for ymin_set in ymin_set_list:
A1_set, B1_set = [], []
for d_ymin in ymin_set:
A1 = np.concatenate([np.identity(3), np.reshape(matmul3x1(R, d_ymin), (3, 1))], axis=-1)
A1 = np.concatenate([A1, np.reshape([0, 0, 0, 1], (1, 4))], axis=0)
A1 = matmul4x4(proj_mat, A1)
B1_set.append(A1)
A1 = A1[1, :] - y_min * A1[2, :]
A1_set.append(A1)
A1_set_list.append(A1_set)
B1_set_list.append(B1_set)
for ymax_set in ymax_set_list:
A3_set, B3_set = [], []
for d_ymax in ymax_set:
A3 = np.concatenate([np.identity(3), np.reshape(matmul3x1(R, d_ymax), (3, 1))], axis=-1)
A3 = np.concatenate([A3, np.reshape([0, 0, 0, 1], (1, 4))], axis=0)
A3 = matmul4x4(proj_mat, A3)
B3_set.append(A3)
A3 = A3[1, :] - y_max * A3[2, :]
A3_set.append(A3)
A3_set_list.append(A3_set)
B3_set_list.append(B3_set)
for A0_set, B0_set, A2_set, B2_set in zip(A0_set_list, B0_set_list, A2_set_list, B2_set_list):
for A1_set, B1_set, A3_set, B3_set in zip(A1_set_list, B1_set_list, A3_set_list, B3_set_list):
for A0, B0 in zip(A0_set, B0_set):
for A1, B1 in zip(A1_set, B1_set):
for A2, B2 in zip(A2_set, B2_set):
for A3, B3 in zip(A3_set, B3_set):
A = np.stack([A0, A1, A2, A3], axis=0)
# U, S, VH = scipy.linalg.svd(A)
U, S, VH = np.linalg.svd(A, full_matrices=True)
translation = VH[-1, :]
# translation = np.array([1.0, 1.0, 1.0, 1.0])
if translation[-1] * translation[-2] > 0:
translation = translation / translation[-1]
x_min_pred0 = matmul4x1(B0_set[0], translation)
x_min_pred0 = (x_min_pred0[:2] / x_min_pred0[2])[0]
x_min_pred1 = matmul4x1(B0_set[1], translation)
x_min_pred1 = (x_min_pred1[:2] / x_min_pred1[2])[0]
x_min_pred = np.min((x_min_pred0, x_min_pred1))
y_min_pred0 = matmul4x1(B1_set[0], translation)
y_min_pred0 = (y_min_pred0[:2] / y_min_pred0[2])[1]
y_min_pred1 = matmul4x1(B1_set[1], translation)
y_min_pred1 = (y_min_pred1[:2] / y_min_pred1[2])[1]
# y_min_pred = np.min((y_min_pred0, y_min_pred1))
y_min_pred2 = matmul4x1(B1_set[2], translation)
y_min_pred2 = (y_min_pred2[:2] / y_min_pred2[2])[1]
y_min_pred3 = matmul4x1(B1_set[3], translation)
y_min_pred3 = (y_min_pred3[:2] / y_min_pred3[2])[1]
y_min_pred = np.min((y_min_pred0, y_min_pred1, y_min_pred2, y_min_pred3))
x_max_pred0 = matmul4x1(B2_set[0], translation)
x_max_pred0 = (x_max_pred0[:2] / x_max_pred0[2])[0]
x_max_pred1 = matmul4x1(B2_set[1], translation)
x_max_pred1 = (x_max_pred1[:2] / x_max_pred1[2])[0]
x_max_pred = np.max((x_max_pred0, x_max_pred1))
y_max_pred0 = matmul4x1(B3_set[0], translation)
y_max_pred0 = (y_max_pred0[:2] / y_max_pred0[2])[1]
y_max_pred1 = matmul4x1(B3_set[1], translation)
y_max_pred1 = (y_max_pred1[:2] / y_max_pred1[2])[1]
# y_max_pred = np.max((y_max_pred0, y_max_pred1))
y_max_pred2 = matmul4x1(B3_set[2], translation)
y_max_pred2 = (y_max_pred2[:2] / y_max_pred2[2])[1]
y_max_pred3 = matmul4x1(B3_set[3], translation)
y_max_pred3 = (y_max_pred3[:2] / y_max_pred3[2])[1]
y_max_pred = np.max((y_max_pred0, y_max_pred1, y_max_pred2, y_max_pred3))
# if y_min<y_min_pred and y_max>y_max_pred:
if x_min_pred < x_max_pred and y_min_pred < y_max_pred:
# if x_min_pred>=x_min and x_max_pred<=x_max:
# if y_min_pred>=y_min and y_max_pred<=y_max:
bbox2D_pred_area = (x_max_pred - x_min_pred) * (y_max_pred - y_min_pred)
bbox2D_gt_area = (x_max - x_min) * (y_max - y_min)
x_min_inter, x_max_inter = | np.max((x_min_pred, x_min)) | numpy.max |
# pylint: disable=missing-module-docstring
import numpy as np
import scipy.stats as ss
from scipy import linalg
class CampbellBacktesting:
"""
This class implements the Haircut Sharpe Ratios and Profit Hurdles algorithms described in the following paper:
`<NAME> and <NAME>, Backtesting, (Fall 2015). Journal of Portfolio Management,
2015 <https://papers.ssrn.com/abstract_id=2345489>`_; The code is based on the code provided by the authors of the paper.
The Haircut Sharpe Ratios algorithm lets the user adjust the observed Sharpe Ratios to take multiple testing into account
and calculate the corresponding haircuts. The haircut is the percentage difference between the original Sharpe ratio
and the new Sharpe ratio.
The Profit Hurdle algorithm lets the user calculate the required mean return for a strategy at a given level of
significance, taking multiple testing into account.
"""
def __init__(self, simulations=2000):
"""
Set the desired number of simulations to make in Haircut Sharpe Ratios or Profit Hurdle algorithms.
:param simulations: (int) Number of simulations
"""
self.simulations = simulations
@staticmethod
def _sample_random_multest(rho, n_trails, prob_zero_mean, lambd, n_simulations, annual_vol=0.15, n_obs=240):
"""
Generates empirical p-value distributions.
The algorithm is described in the paper and is based on the model estimated by `<NAME>., <NAME>,
and <NAME>., … and the Cross-section of Expected Returns. Review of Financial Studies, forthcoming 2015`,
referred to as the HLZ model.
It provides a set of simulated t-statistics based on the parameters recieved from the _parameter_calculation
method.
Researchers propose a structural model to capture trading strategies’ underlying distribution.
With probability p0 (prob_zero_mean), a strategy has a mean return of zero and therefore comes
from the null distribution. With probability 1 – p0, a strategy has a nonzero mean and therefore
comes from the alternative distribution - exponential.
:param rho: (float) Average correlation among returns
:param n_trails: (int) Total number of trials inside a simulation
:param prob_zero_mean: (float) Probability for a random factor to have a zero mean
:param lambd: (float) Average of monthly mean returns for true strategies
:param n_simulations: (int) Number of rows (simulations)
:param annual_vol: (float) HLZ assume that the innovations in returns follow a normal distribution with a mean
of zero and a standard deviation of ma = 15%
:param n_obs: (int) Number of observations of used for volatility estimation from HLZ
:return: (np.ndarray) Array with distributions calculated
"""
# Assumed level of monthly volatility = adjusted yearly volatility
monthly_volatility = annual_vol / 12 ** (1 / 2)
# Creating a correlation matrix of simulated returns. All correlations are assumed to be the same as average
# correlation among returns
# The first row of the correlation matrix: [1, rho, rho, .., rho]
correlation_vector = np.insert(rho * np.ones((1, n_trails - 1)), 0, 1)
# Correlation matrix created from the vector by expanding it
correlation_matrix = linalg.toeplitz(correlation_vector)
# Vector with mean of simulated returns - zeros
mean = np.zeros(n_trails)
# Creating a sample from a multivariate normal distribution as returns simulations
# Covariance matrix - Created from correlation matrix multiplied by monthly volatility and adjusted
covariance_matrix = correlation_matrix * (monthly_volatility ** 2 / n_obs)
# Result - n_simulations rows with n_trails inside
shock_mat = np.random.multivariate_normal(mean, covariance_matrix, n_simulations)
# Sample of uniform distribution with the same dimensions as shock_mat
prob_vec = np.random.uniform(0, 1, (n_simulations, n_trails))
# Sample of exponential distribution with same dimensions ad shock_mat
mean_vec = np.random.exponential(lambd, (n_simulations, n_trails))
# Taking the factors that have non-zero mean
nonzero_mean = prob_vec > prob_zero_mean
# Generating the null hypothesis - either zero mean or from an exponential distribution
mu_null = np.multiply(nonzero_mean, mean_vec)
# Matrix of p-value distributions
tstat_matrix = abs(mu_null + shock_mat) / (monthly_volatility / n_obs ** (1 / 2))
return tstat_matrix
@staticmethod
def _parameter_calculation(rho):
"""
Estimates the parameters used to generate the distributions in _sample_random_multest - the HLZ model.
Based on the work of HLZ, the pairwise correlation of returns is used to estimate the probability (prob_zero_mean),
total number of trials (n_simulations) and (lambd) - parameter of the exponential distribution. Levels and
parameters taken from the HLZ research.
:param rho: (float) Average correlation coefficient between strategy returns
:return: (np.array) Array of parameters
"""
# Levels of parameters based on rho. [rho, n_simulations, prob_zero_mean, lambd]
parameter_levels = np.array([[0, 1295, 3.9660 * 0.1, 5.4995 * 0.001],
[0.2, 1377, 4.4589 * 0.1, 5.5508 * 0.001],
[0.4, 1476, 4.8604 * 0.1, 5.5413 * 0.001],
[0.6, 1773, 5.9902 * 0.1, 5.5512 * 0.001],
[0.8, 3109, 8.3901 * 0.1, 5.5956 * 0.001]])
# Linear interpolation for parameter estimates
if (rho < 0):
parameters = parameter_levels[1] # Set at the preferred level if rho is misspecified
elif (rho < 0.2):
parameters = ((0.2 - rho) / 0.2) * parameter_levels[0] + ((rho - 0) / 0.2) * parameter_levels[1]
elif (rho < 0.4):
parameters = ((0.4 - rho) / 0.2) * parameter_levels[1] + ((rho - 0.2) / 0.2) * parameter_levels[2]
elif (rho < 0.6):
parameters = ((0.6 - rho) / 0.2) * parameter_levels[2] + ((rho - 0.4) / 0.2) * parameter_levels[3]
elif (rho < 0.8):
parameters = ((0.8 - rho) / 0.2) * parameter_levels[3] + ((rho - 0.6) / 0.2) * parameter_levels[4]
elif (rho < 1.0): # Interpolation based on the previous level here
parameters = ((0.8 - rho) / 0.2) * parameter_levels[3] + ((rho - 0.6) / 0.2) * parameter_levels[4]
else:
parameters = parameter_levels[1] # Set at the preferred level if rho is misspecified
return parameters
@staticmethod
def _annualized_sharpe_ratio(sharpe_ratio, sampling_frequency='A', rho=0, annualized=False,
autocorr_adjusted=False):
"""
Calculate the equivalent annualized Sharpe ratio after taking the autocorrelation of returns into account.
Adjustments are based on the work of `<NAME>., The Statistics of Sharpe Ratios. Financial Analysts Journal,
58 (2002), pp. 36-52` and are described there in more detail.
:param sharpe_ratio: (float) Sharpe ratio of the strategy
:param sampling_frequency: (str) Sampling frequency of returns
['D','W','M','Q','A'] = [Daily, Weekly, Monthly, Quarterly, Annual]
:param rho: (float) Autocorrelation coefficient of returns at specified frequency
:param annualized: (bool) Flag if annualized, 'ind_an' = 1, otherwise = 0
:param autocorr_adjusted: (bool) Flag if Sharpe ratio was adjusted for returns autocorrelation
:return: (float) Adjusted annualized Sharpe ratio
"""
# If not annualized, calculating the appropriate multiplier for the Sharpe ratio
if sampling_frequency == 'D':
times_per_year = 360
elif sampling_frequency == 'W':
times_per_year = 52
elif sampling_frequency == 'M':
times_per_year = 12
elif sampling_frequency == 'Q':
times_per_year = 4
elif sampling_frequency == 'A':
times_per_year = 1
else:
times_per_year = 1 # Misspecified
if not annualized:
annual_multiplier = times_per_year ** (1 / 2)
else:
annual_multiplier = 1
# If not adjusted for returns autocorrelation, another multiplier
if not autocorr_adjusted:
autocorr_multiplier = (1 + (2 * rho / (1 - rho)) * (1 - ((1 - rho ** (times_per_year)) /
(times_per_year * (1 - rho))))) ** (-0.5)
else:
autocorr_multiplier = 1
# And calculating the adjusted Sharpe ratio
adjusted_sr = sharpe_ratio * annual_multiplier * autocorr_multiplier
return adjusted_sr
@staticmethod
def _monthly_observations(num_obs, sampling_frequency):
"""
Calculates the number of monthly observations based on sampling frequency and number of observations.
:param num_obs: (int) Number of observations used for modelling
:param sampling_frequency: (str) Sampling frequency of returns
['D','W','M','Q','A'] = [Daily, Weekly, Monthly, Quarterly, Annual]
:return: (np.float64) Number of monthly observations
"""
# N - Number of monthly observations
if sampling_frequency == 'D':
monthly_obs = np.floor(num_obs * 12 / 360)
elif sampling_frequency == 'W':
monthly_obs = np.floor(num_obs * 12 / 52)
elif sampling_frequency == 'M':
monthly_obs = np.floor(num_obs * 12 / 12)
elif sampling_frequency == 'Q':
monthly_obs = np.floor(num_obs * 12 / 4)
elif sampling_frequency == 'A':
monthly_obs = np.floor(num_obs * 12 / 1)
else: # If the frequency is misspecified
monthly_obs = np.floor(num_obs)
return monthly_obs
@staticmethod
def _holm_method_sharpe(all_p_values, num_mult_test, p_val):
"""
Runs one cycle of the Holm method for the Haircut Shape ratio algorithm.
:param all_p_values: (np.array) Sorted p-values to adjust
:param num_mult_test: (int) Number of multiple tests allowed
:param p_val: (float) Significance level p-value
:return: (np.float64) P-value adjusted at a significant level
"""
# Array for final p-values of the Holm method
p_holm_values = np.array([])
# Iterating through multiple tests
for i in range(1, (num_mult_test + 2)):
# Creating array for Holm adjusted p-values (M-j+1)*p(j) in the paper
p_adjusted_holm = np.array([])
# Iterating through the available subsets of Holm adjusted p-values
for j in range(1, i + 1):
# Holm adjusted p-values
p_adjusted_holm = np.append(p_adjusted_holm, (num_mult_test + 1 - j + 1) * all_p_values[j - 1])
# Calculating the final p-values of the Holm method and adding to an array
p_holm_values = np.append(p_holm_values, min(max(p_adjusted_holm), 1))
# Getting the Holm adjusted p-value that is significant at our p_val level
p_holm_significant = p_holm_values[all_p_values == p_val]
p_holm_result = p_holm_significant[0]
return p_holm_result
@staticmethod
def _bhy_method_sharpe(all_p_values, num_mult_test, p_val):
"""
Runs one cycle of the BHY method for the Haircut Shape ratio algorithm.
:param all_p_values: (np.array) Sorted p-values to adjust
:param num_mult_test: (int) Number of multiple tests allowed
:param p_val: (float) Significance level p-value
:param c_constant: (float) Constant used in BHY method
:return: (np.float64) P-value adjusted at a significant level
"""
# Array for final p-values of the BHY method
p_bhy_values = np.array([])
# BHY constant
index_vector = np.arange(1, num_mult_test + 1)
c_constant = sum(1 / index_vector)
# Iterating through multiple tests backwards
for i in range(num_mult_test + 1, 0, -1):
if i == (num_mult_test + 1): # If it's the last observation
# The p-value stays the same
p_adjusted_holm = all_p_values[-1]
else: # If it's the previous observations
# The p-value is adjusted according to the BHY method
p_adjusted_holm = min(((num_mult_test + 1) * c_constant / i) * all_p_values[i - 1], p_previous)
# Adding the final BHY method p-values to an array
p_bhy_values = np.append(p_adjusted_holm, p_bhy_values)
p_previous = p_adjusted_holm
# Getting the BHY adjusted p-value that is significant at our p_val level
p_bhy_significant = p_bhy_values[all_p_values == p_val]
p_bhy_result = p_bhy_significant
return p_bhy_result
@staticmethod
def _sharpe_ratio_haircut(p_val, monthly_obs, sr_annual):
"""
Calculates the adjusted Sharpe ratio and the haircut based on the final p-value of the method.
:param p_val: (float) Adjusted p-value of the method
:param monthly_obs: (int) Number of monthly observations
:param sr_annual: (float) Annualized Sharpe ratio to compare to
:return: (np.array) Elements (Adjusted annual Sharpe ratio, Haircut percentage)
"""
# Inverting to get z-score of the method
z_score = ss.t.ppf(1 - p_val / 2, monthly_obs - 1)
# Adjusted annualized Sharpe ratio of the method
sr_adjusted = (z_score / monthly_obs ** (1 / 2)) * 12 ** (1 / 2)
# Haircut of the Sharpe ratio of the method
haircut = (sr_annual - sr_adjusted) / sr_annual * 100
return (sr_adjusted, haircut)
@staticmethod
def _holm_method_returns(p_values_simulation, num_mult_test, alpha_sig):
"""
Runs one cycle of the Holm method for the Profit Hurdle algorithm.
:param p_values_simulation: (np.array) Sorted p-values to adjust
:param num_mult_test: (int) Number of multiple tests allowed
:param alpha_sig: (float) Significance level (e.g., 5%)
:return: (np.float64) P-value adjusted at a significant level
"""
# Array for adjusted significance levels
sign_levels = | np.zeros(num_mult_test) | numpy.zeros |
# 1.0 IMPORTING LIBRARIES -------------------------------------------------------------------
#region
import pandas as pd
import numpy as np
import os
import re
import glob
import csv
import shutil
print('Libraries Imported.')
#endregion
# 2 READ DATA----------------------------------------------------------------------
#region
# 2.1 Read a Single CSV file
#region
# Directory folder of the csv files you want to process
filename = r'C:\FILES\Hansen-Data-Qualified8.csv'
print('Data will load from: ', filename)
# Can change to xlsx if needed, other changes will be nessesary to code
Extension = 'csv'
# Csv files seperator for input and output files..generally (,) or (|)
DeLimiter = ','
print('Directories loaded.')
print('Loading Data...')
# Code
df_data = pd.read_csv(filename, sep=DeLimiter, engine='python', dtype=str)
print('-------------SHAPE--------------')
print(df_data.shape)
print('----------DATAFRAME-------------')
print(df_data.head(1))
print('--------------------------------')
print('Dataframe Loaded.')
#End 2.1
#endregion
# 2.2 Read a Single XLSX file
#region
#End 2.2
#endregion
#End 2
#endregion
# 3 VIEWING ITEMS IN COLUMNS -----------------------------------------------------------
#region
# 3.1 Viewing a list of all the unique items in a column
#region
# Input Params
Column_Name_To_Check3 = 'SPOTCODE'
# Create Array of Unique Items
# Swap the name of the column to rename
df_data.rename(columns={Column_Name_To_Check3: 'coltocheck'}, inplace=True)
# Check Data
Unique_Array = df_data.coltocheck.unique()
Unique_Array.sort()
# Swap back the name of the column to rename
df_data.rename(columns={'coltocheck': Column_Name_To_Check3}, inplace=True)
#export the Unique Items Array
Output_Loc_Filname = r'C:\FILES\UniqueCodes2.csv'
pd.DataFrame(Unique_Array).to_csv(Output_Loc_Filname)
np.savetxt(Output_Loc_Filname , Unique_Array, delimiter=',', fmt='%s')
#End-3.1
#endregion
# 3.2 Comparting Rows in a Column with items in a lookup table
#region
Data_Column_Name= 'SPOTCODE-E'
Create_Unmapped_CSV = 'y'
Lookup_Table_Dir = r'C:\FILES\Hansen-Methods-In-EnviroSys.xlsx'
Sheet_To_Load = 'Data'
Lookup_Column_Name_To_Check = 'Method Short Name'
Output_Loc_Filname = r'C:\FILES\UniqueCodesUnmapped5.csv'
#Load Lookup Table
df_lookup = pd.read_excel(Lookup_Table_Dir ,
sheet_name = Sheet_To_Load,
dtype=object)
print('loaded lookup table.')
print('Rows, Columns:')
print(df_lookup.shape)
# Delete All columns in lookup table except
Cols_Dont_Delete = [Lookup_Column_Name_To_Check]
df_lookup.drop(df_lookup.columns.difference(Cols_Dont_Delete), 1, inplace=True)
print('Columns Deleted.')
print('Rows, Columns:')
print(df_lookup.shape)
print('Generating Bools Filter...')
Bools_Mapping_Series = df_data[Data_Column_Name].isin(df_lookup[Lookup_Column_Name_To_Check])
print('Bools Generated.')
print('Creating dataframe with filtered data df_data2...')
df_data2 = df_data[Bools_Mapping_Series]
df_deleted = df_data[~Bools_Mapping_Series]
print('Data Filtered and now in df_data2')
df_deleted.rename(columns={Data_Column_Name: 'coltocheck'}, inplace=True)
Unique_Array_Unmapped = df_deleted.coltocheck.unique()
Unique_Array_Unmapped.sort()
print('Created ndarray of unmapped items called: Unique_Array_Unmapped')
df_data.rename(columns={'coltocheck': Data_Column_Name}, inplace=True)
#export the Unique Items Array to a CSV
if Create_Unmapped_CSV == 'y':
print('Exporting Unmapped Items to a CSV...')
pd.DataFrame(Unique_Array_Unmapped).to_csv(Output_Loc_Filname)
np.savetxt(Output_Loc_Filname , Unique_Array_Unmapped, delimiter=',', fmt='%s')
print('Unique Unmapped CSV created.')
print('See Location: ', Output_Loc_Filname)
print('DONE!')
print('------------------------------------')
#End-3.2
#endregion
#End-3
#endregion
# 5 DATA WRANGLING
#region
# 5.1 Deleting Rows
#region
# 5.1.1 Deleting Rows with everything missing in the row
#region
# Checking number of rows
rows_count = df_data.shape[0]
# Dropping rows
df_data2 = df_data.dropna(axis='index', how='all')
df_data = df_data2
# Checking number of rows removed
rows_deleted = df_data.shape[0] - rows_count
# Printing rows removed
print("Number of Rows Deleted =", rows_deleted)
#endregion
# 5.1.2 Deleting Rows with missing info in certain columns
#region
# Input Parameters
Cols_To_Check = ['WONO', 'SPOTVAL', 'ADDDTTM']
#note 'any' will delete a row if any of the columns is mssing data
#note 'all' will only deleter a row if all the columns above are NA.
How_to_delete = 'any'
# Checking number of rows
rows_count = df_data.shape[0]
# Dropping rows
df_data2 = df_data.dropna(axis='index', how = How_to_delete, subset = Cols_To_Check)
df_data = df_data2
df_var_checker = df_data.head(5)
# Checking number of rows removed
rows_deleted = df_data.shape[0] - rows_count
# Printing rows removed
print("Number of Rows Deleted =", rows_deleted)
#endregion
# 5.1.3 Deleting Rows matching a certain string in a certain column
#region
Column_Name_To_Check = "SPOTVAL"
String_To_Check = ' '
# Bools_With_String = df_data[Column_Name_To_Check].str.contains(String_To_Check)
# Bools_With_String = df_data[Column_Name_To_Check].str.startswith(String_To_Check)
Bools_With_String = df_data[Column_Name_To_Check].str.match(String_To_Check)
Bools_Without_String = ~Bools_With_String
df_data2 = df_data[Bools_Without_String]
Row_Difference = df_data.shape[0] - df_data2.shape[0]
df_data = df_data2
print('Rows Deleted: ', Row_Difference)
'''Working progress...
Strings_To_Check = [' ', ' ']
Columns_To_Check = ['WONO']
df_data2 = df_data.filter(axis='index', items = Columns_To_Check, regex=' ')
'''
#endregion
# 5.1.4 Deleting and/or Checking Rows that don't map to items in a lookup table
#region
Delete_Unmapped = 'n'
Create_Unmapped_CSV = 'y'
Lookup_Table_Dir = r'C:\FILES\Hansen-Methods-In-EnviroSys.xlsx'
Output_Loc_Filname = r'C:\FILES\UniqueCodesUnmapped5.csv'
Sheet_To_Load = 'Data'
Lookup_Column_Name_To_Check = 'Method Short Name'
Column_Name_To_Apply_Deletions = 'SPOTCODE-E'
#Load Lookup Table
df_lookup = pd.read_excel(Lookup_Table_Dir ,
sheet_name = Sheet_To_Load,
dtype=object)
print('loaded lookup table.')
print('Rows, Columns:')
print(df_lookup.shape)
# Delete All columns except
Cols_Dont_Delete = [Lookup_Column_Name_To_Check]
df_lookup.drop(df_lookup.columns.difference(Cols_Dont_Delete), 1, inplace=True)
print('Columns Deleted.')
print('Rows, Columns:')
print(df_lookup.shape)
print('Generating Bools Filter...')
Bools_Mapping_Series = df_data[Column_Name_To_Apply_Deletions].isin(df_lookup[Lookup_Column_Name_To_Check])
print('Bools Generated.')
df_data2 = df_data[Bools_Mapping_Series]
df_deleted = df_data[~Bools_Mapping_Series]
print('Data Filtered and now in df_data2')
df_deleted.rename(columns={Column_Name_To_Apply_Deletions: 'coltocheck'}, inplace=True)
Unique_Array_Unmapped = df_deleted.coltocheck.unique()
Unique_Array_Unmapped.sort()
print('Created ndarray of unmapped items called: Unique_Array_Unmapped')
df_data.rename(columns={'coltocheck': Column_Name_To_Apply_Deletions}, inplace=True)
#export the Unique Items Array to a CSV
if Create_Unmapped_CSV == 'y':
print('Exporting Unmapped Items to a CSV...')
pd.DataFrame(Unique_Array_Unmapped).to_csv(Output_Loc_Filname)
np.savetxt(Output_Loc_Filname , Unique_Array_Unmapped, delimiter=',', fmt='%s')
print('Unique Unmapped CSV created.')
print('See Location: ', Output_Loc_Filname)
# Effectively Deleting Unmapped Items if requested
if Delete_Unmapped == 'y':
print('deleting Unmapped Items from df_data')
df_data = df_data2
else:
print('Original Data is untouched in df_data and filtered data is stored in df_data2')
#End 5.1.4
#endregion
#End 5.1
#endregion
# 5.2 Delete Columns
#region
Cols_To_Delete = [
'WONO',
'SPOTCODE',
]
df_data.drop(Cols_To_Delete, axis=1, inplace=True)
df_data.head()
#endregion
# 5.3 Concatenate Columns to Create Keys
#region
Name_of_New_Col = 'KEY'
Cols_To_Join = ['SPOTCODE', 'UM', 'DESCRIPT']
df_data = df_data.astype(str)
df_data[Name_of_New_Col] = df_data[Cols_To_Join].apply(lambda x: '-'.join(x.map(str)), axis=1)
df_data.head()
#endregion
# 5.4 Swapping Data in a Column based on a csv lookup table
#region
# Input Params
#region
# Column to apply the Swapping to
ColName = 'WONO'
# Directory file of Lookup Table for swapping
# Note: have to columns named 'FIND' and 'REPLACE'
Input_path_Lookup = 'C:/FILES/find-replace.csv'
#endregion
# Read and process the lookup table
#region
df_lookup = pd.read_csv(Input_path_Lookup, dtype={'FIND': object, 'REPLACE': object})
# Delete Rows with everything missing in the row
df_lookup = df_lookup.dropna(axis='index', how='all')
# Delete non Unique (duplicate) FIND rows
df_lookup.drop_duplicates(subset='FIND', keep=False, inplace=True)
# Create a list of Unique Find items
List_Subs = df_lookup['FIND'].tolist()
# Change index to FIND
df_lookup.set_index('FIND', inplace = True)
dict_Subs = df_lookup.to_dict()
dict_Subs = dict_Subs.get('REPLACE')
print('SUBSTITUTIONS...')
print(dict_Subs)
#endregion
# Apply swap changes to dataframe
#region
# Swap the name of the column to rename
df_data.rename(columns={ColName: 'coltoswapxy'}, inplace=True)
# Make the replacements
df_data2 = df_data.coltoswapxy.replace(dict_Subs)
# Swap back the name of the column to rename
df_data2.rename(columns={'coltoswapxy': ColName}, inplace=True)
df_data = df_data2
print(df_data.head())
print('DONE SWAPPING')
#endregion
#endregion
# 5.5 Sorting the Table
#region
Columns_Sort_Order = ['WONO-E', 'SPOTCODE-E']
df_data.sort_values(by = Columns_Sort_Order, inplace=True)
print(df_data.head())
#endregion
# 5.6 Editing Data
#region
# 5.6.1 Delete spaces from start and end of a cell swap to 'lstrip' or 'rstrip' if required
#region
Column_To_Edit = 'SPOTCODE'
Delete_Original_Column = 'n'
Edited_Col_Name = Column_To_Edit + '-E'
df_data.rename(columns={Column_To_Edit: 'editingthiscolumn'}, inplace=True)
New_Col_Edited = df_data.editingthiscolumn.str.strip()
df_data[Edited_Col_Name] = New_Col_Edited
df_data.rename(columns={'editingthiscolumn': Column_To_Edit}, inplace=True)
if Delete_Original_Column == 'y':
df_data.drop(Column_To_Edit, axis=1, inplace=True)
df_data.rename(columns={Edited_Col_Name: Column_To_Edit}, inplace=True)
#End-5.6.1
#endregion
# 5.6.2 Delete a character from the end of a cell
#region
Column_To_Edit = 'WONO-E'
Delete_Original_Column = 'y'
String_To_Strip = '/'
Edited_Col_Name = Column_To_Edit + '-E'
df_data.rename(columns={Column_To_Edit: 'editingthiscolumn'}, inplace=True)
New_Col_Edited = df_data.editingthiscolumn.str.rstrip(String_To_Strip)
df_data[Edited_Col_Name] = New_Col_Edited
df_data.rename(columns={'editingthiscolumn': Column_To_Edit}, inplace=True)
if Delete_Original_Column == 'y':
df_data.drop(Column_To_Edit, axis=1, inplace=True)
df_data.rename(columns={Edited_Col_Name: Column_To_Edit}, inplace=True)
#endregion
#End-5.6.2
# 5.6.5 Adding a string to the start of a row based on condition
#region
String_To_Check = 'SPT0'
Column_Name_To_Check = 'WONO-E'
String_To_Add = 'HANSEN-'
# Get Indexes of Rows
df_data.reset_index(drop=True)
Bools_With_String = df_data[Column_Name_To_Check].str.startswith(String_To_Check)
Index_Array = Bools_With_String[Bools_With_String].index.values
# Edit Cells
for item in Index_Array:
x = df_data.at[item, Column_Name_To_Check]
new_string = String_To_Add + str(x)
df_data.at[item, Column_Name_To_Check] = new_string
#End 5.6.5
#endregion
#End-5.6
#endregion
#End-5
#endregion
# 6 EXPORTING DATA
#region
# 6.1 Standard Export
#region
# Input Params
Output_Location = 'C:/FILES/'
Output_filename = 'Hansen-Data-Qualified8'
Output_Extension = '.csv'
Delimiter = ','
# Creating File
FName = Output_Location + Output_filename + Output_Extension
df_data.to_csv(path_or_buf=FName, sep= Delimiter, index=False)
print('---------------------------------------------')
print('EXPORTING CSV DONE')
#endregion
# 6.2 Chuncked Export
#region
#INPUT PARAMS
Out_File_Loc_Name = 'C:/FILES/HCHUNKS/HANSENDB_.csv'
Output_Extension = '.csv'
Delimiter = '|'
ChunckSize = 1000
#CODE
NumToRemove = -1 * (len(Output_Extension))
Output_filename = Out_File_Loc_Name[:NumToRemove]
Full_Filepath = Output_filename + Output_Extension
Row_Max = int((df_data.shape[0])-1)
NoChuncks = Row_Max/ChunckSize
NoChuncksInt = int(NoChuncks)
PartialChunck = NoChuncks - NoChuncksInt
print('Chunking Data...')
if PartialChunck > 0:
Bool_PartialChunck = True
else:
Bool_PartialChunck = False
counter = int(0)
FromRow = 0
ToRow = ChunckSize
while counter <= NoChuncksInt:
df_temp=df_data.iloc[FromRow:ToRow, :]
FromRow = ToRow
ToRow = ToRow + ChunckSize
counter += 1
FName = Output_filename + str(counter) + Output_Extension
df_temp.to_csv(path_or_buf=FName, sep=Delimiter, index=False)
if PartialChunck == True:
counter += 1
FName = Output_filename + str(counter) + Output_Extension
df_temp=df_data.iloc[FromRow::, :]
df_temp.to_csv(path_or_buf=FName, sep=Delimiter, index=False)
print('EXPORTED CHUNKING DONE!')
#endregion
#endregion
# 7 Bulk edit csv files
# 7.1 Full MW cleanse and move
#region
# INPUT VARIABLES----------------------------------------------------------------------------------------
#region
# Directory folder of the csv files you want to process
Input_path_CSVs = 'C:/FILES/Input_CSV/'
# Can change to xlsx if needed, other changes will be nessesary to code
Extension = 'csv'
# Csv files seperator for input and output files..generally (,) or (|)
Delimiter = '|'
# Directory folder of the TKC cross reference table
Input_path_TKC_files = 'D:/FILES/Input_TKC_files/'
# Directory excel file of the Sample Point Table
Input_path_SPT = 'C:/FILES/Sample Points_37883_20180926_134607.xlsx'
# Output folder of the CSV files
Output_path_processed_csv = 'C:/FILES/Output_CSV_Processed/'
# Output folder path of bad SPT CSV files
Output_path_badSPT = 'C:/FILES/Output_CSV_Bad_SPT/'
# Output folder path of TKC Unmapped Data
Output_path_badTKC = 'C:/FILES/Output_CSV_Bad_TKC/'
# Output folder path of Retest CSV files
Output_path_Retests = 'C:/FILES/Output_CSV_Retests/'
# Output folder path of CSV Files with Structure that can't be Analysed
Output_path_bad_structure = 'C:/FILES/Output_CSV_Bad_Column_Structure/'
# Output folder path of Report on Analysed files
Output_path_Report = 'C:/FILES/'
print('Directories loaded...')
#endregion
# READ AND PROCESS THE UNIQUE SAMPLE POINTS FILE----------------------------------------------------------------
#region
df_SPTs = pd.read_excel(Input_path_SPT, sheet_name='Data', dtype={'Name': object, 'OldSiteCode_post2007': object})
List_Columns_Keep = ['Name','OldSiteCode_post2007']
df_SPTs = df_SPTs[List_Columns_Keep]
df_SPTs.columns = ['SPT', 'OSC']
# Remove ESP points
df_SPTs = df_SPTs[~df_SPTs['SPT'].astype(str).str.startswith('ESP')]
# Delete non Unique (duplicate) OSC's
df_SPTs.drop_duplicates(subset='OSC', keep=False, inplace=True)
# Create a list of Unique OSC's
List_OSCs = df_SPTs['OSC'].tolist()
# Change index to old site code
df_SPTs.set_index('OSC', inplace = True)
dict_SPTs = df_SPTs.to_dict()
dict_SPTs = dict_SPTs.get('SPT')
#print('SPT Cross Reference Table created...')
#endregion
# READ AND PROCESS THE TKC FILES-------------------------------------------------------------------------
#region
os.chdir(Input_path_TKC_files)
filenames = [i for i in glob.glob('*.{}'.format('xlsx'))]
List_Columns_Keep = ['Test Key Code (TKC)','Valid', 'Data Type']
bool_df_created = False
for filename in filenames:
if bool_df_created == False:
df_TKCs = pd.read_excel(filename, sheet_name='Data', dtype={'Test Key Code (TKC)': object})
bool_df_created = True
df_TKCs = df_TKCs[List_Columns_Keep]
df_TKCs.columns = ['TKC', 'Valid','DT']
else:
df_temp = pd.read_excel(filename, sheet_name='Data', dtype={'Test Key Code (TKC)': object})
df_temp = df_temp[List_Columns_Keep]
df_temp.columns = ['TKC', 'Valid','DT']
df_TKCs = df_TKCs.append(df_temp)
# Remove Biosolids Monitoring TKC's from Dataframe and delete DT column
df_TKCs = df_TKCs[~df_TKCs['DT'].astype(str).str.startswith('Bio')]
df_TKCs.drop('DT', axis=1, inplace=True)
# Remove Invalid TKC's from Dataframe and delete Valid column
df_TKCs = df_TKCs[~df_TKCs['Valid'].astype(str).str.startswith('N')]
df_TKCs.drop('Valid', axis=1, inplace=True)
# Delete non Unique (duplicate) TKC's
df_TKCs.drop_duplicates(subset='TKC', keep=False, inplace=True)
#Create List of Mapped TKCs
List_TKCs = df_TKCs['TKC'].tolist()
print('Mapped TKC List created...')
#endregion
# SAVE CSV FILENAMES IN A LIST AND DATAFRAME-------------------------------------------------------------
#region
# Get the csv filenames into an array
os.chdir(Input_path_CSVs)
filenames = [i for i in glob.glob('*.{}'.format(Extension))]
# Get the number of csv files
NumFiles = len(filenames)
print(NumFiles, 'csv files found...')
#endregion
# MOVE FILES WITHOUT 'LOCATIONCODE' or 'LocationDescription' --------------------------------------------
#region
counter_good_files = 0
counter_bad_files = 0
List_Unsupported_Files = []
for filename in filenames:
# Save an individual file as a DataFrame Object to analyse
try:
df_file = pd.read_csv(filename, sep=Delimiter, index_col=False, engine='python')
if ('LOCATIONCODE' in df_file.columns) and ('LocationDescription' in df_file.columns):
counter_good_files +=1
else:
List_Unsupported_Files.append(filename)
counter_bad_files +=1
except:
List_Unsupported_Files.append(filename)
counter_bad_files +=1
# Print stats
print('Number of Files that can be Analysed:', counter_good_files)
print("Number of Files that can't be Analysed:", counter_bad_files)
# Move files
files = os.listdir(Input_path_CSVs)
for f in files:
if f in List_Unsupported_Files:
shutil.move(f, Output_path_bad_structure)
# Get the csv filenames into an array after unwanted ones are moved
os.chdir(Input_path_CSVs)
filenames = [i for i in glob.glob('*.{}'.format(Extension))]
#endregion
# CREATE AN EMPTY DATAFRAME FOR REPORT-------------------------------------------------------------------
#region
List_Columns = ['Filename', 'Total Rows', 'Duplicates', 'Retests', 'Rows QA Data', 'No SPT Code', 'Replaced SPT Codes', 'Rows With Unmapped TKCs']
df_Report = pd.DataFrame(columns=List_Columns)
#endregion
# LOOP THOUGH EACH FILE AND PROCESS IT ------------------------------------------------------------------
for filename in filenames:
print('-----------------------------------------------')
print('current file:')
print(filename)
# Set Booleans
QA_Data_In_File = False
Bad_sptz = False
Retests_In_File = False
Duplicates_In_File = False
Bad_TKCs = False
# Set Counts
Int_Total_Rows = int(0)
Int_Bad_SPTs = int(0)
Int_Replaced_SPTs = int(0)
Int_QA_Rows = int(0)
Int_Dup_Rows = int(0)
Int_Retest_Rows = int(0)
Int_Unmapped_TKCs = int(0)
# Save the individual file as a DataFrame Object to analyse
df_file = pd.read_csv(filename, sep=Delimiter, index_col=False, engine='python', dtype={'LOCATIONCODE': object, 'TEST_KEY_CODE': object})
# Delete Rows with everything missing in the row
df_file = df_file.dropna(axis='index', how='all')
Int_Total_Rows = df_file.shape[0]
################### QA DATA DELETION ##################################
# Check and Find if Blanks exist in Location Code Rows
bools_ml = df_file['LOCATIONCODE'].isnull()
bools_ml = np.array(bools_ml)
# Check and Find if/Where Blanks exist in Location Description Rows
bools_md = df_file['LocationDescription'].isnull()
bools_md = np.array(bools_ml)
# Find Quality Assurance Data Rows
Series_Loc_Desc = df_file['LocationDescription']
Series_Loc_Desc = Series_Loc_Desc.fillna(' ')
bools_bd = Series_Loc_Desc == 'Blind Dup A'
bools_bd = np.array(bools_bd)
bools_bd = | np.logical_and(bools_ml, bools_bd) | numpy.logical_and |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
import warnings
from orangecontrib.wonder.fit.parameters.measured_data.phase import Phase
from orangecontrib.wonder.fit.parameters.initialization.fft_parameters import FFTTypes
from orangecontrib.wonder.fit.parameters.instrument.zero_error import ZeroError
from orangecontrib.wonder.fit.parameters.instrument.lab6_tan_correction import Lab6TanCorrection
from orangecontrib.wonder.fit.parameters.instrument.caglioti import Caglioti
from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters
from orangecontrib.wonder.fit.parameters.instrument.speciment_displacement import SpecimenDisplacement
from orangecontrib.wonder.fit.parameters.thermal.thermal_parameters import ThermalParameters
from orangecontrib.wonder.fit.parameters.instrument.background_parameters import ChebyshevBackground, ExpDecayBackground
from orangecontrib.wonder.fit.parameters.microstructure.strain import InvariantPAH, WarrenModel, KrivoglazWilkensModel
from orangecontrib.wonder.fit.parameters.measured_data.phase_gsasii import GSASIIPhase
from orangecontrib.wonder.fit.functions.gsasii_functions import gsasii_intensity_factor
from orangecontrib.wonder.fit.parameters.additional.pseudo_voigt_peak import SpuriousPeaks
from orangecontrib.wonder.fit.functions.chemical_formula_parser import ChemicalFormulaParser
from orangecontrib.wonder.util.fit_utilities import Utilities, Symmetry
warnings.filterwarnings("ignore", category=RuntimeWarning)
class Distribution:
DELTA = "delta"
LOGNORMAL = "lognormal"
GAMMA = "gamma"
YORK = "york"
@classmethod
def tuple(cls):
return [cls.DELTA, cls.LOGNORMAL, cls.GAMMA, cls.YORK]
class Shape:
NONE = "none"
SPHERE = "sphere"
CUBE = "cube"
TETRAHEDRON = "tetrahedron"
OCTAHEDRON = "octahedron"
CYLINDER = "cylinder"
WULFF = "wulff solid"
@classmethod
def tuple(cls):
return [cls.NONE, cls.SPHERE, cls.CUBE, cls.TETRAHEDRON, cls.OCTAHEDRON, cls.CYLINDER, cls.WULFF]
class WulffCubeFace:
TRIANGULAR = "triangular"
HEXAGONAL = "hexagonal"
@classmethod
def tuple(cls):
return [cls.TRIANGULAR, cls.HEXAGONAL]
class Normalization:
NORMALIZE_TO_N = 0
NORMALIZE_TO_N2 = 1
@classmethod
def tuple(cls):
return ["to N", "to N\u00b2"]
def __H_invariant_square(h, k, l):
numerator = (h * h * k * k + k * k * l * l + l * l * h * h)
denominator = (h**2 + k**2 + l**2)**2
return numerator / denominator
def __merge_functions(list_of_pairs, s):
# x step must be the same for all functions
intensity = numpy.zeros(len(s))
for pair_index in range(list_of_pairs.shape[0]):
intensity += numpy.interp(s, list_of_pairs[pair_index, 0], list_of_pairs[pair_index, 1])
return intensity
#################################################
#
# FIT FUNCTION
#
#################################################
def fit_function_direct(twotheta, fit_global_parameters, diffraction_pattern_index = 0):
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
wavelength = incident_radiation.wavelength.value
I = fit_function_reciprocal(Utilities.s(0.5*numpy.radians(twotheta), wavelength),
fit_global_parameters,
diffraction_pattern_index)
# POLARIZATION FACTOR --------------------------------------------------------------------------------------
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_polarization_factor:
twotheta_mono = polarization_parameters.twotheta_mono
I *= polarization_factor(numpy.radians(twotheta),
None if twotheta_mono is None else numpy.radians(twotheta_mono),
polarization_parameters.degree_of_polarization,
polarization_parameters.beampath)
# ADD BACKGROUNDS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.background_parameters is None:
for key in fit_global_parameters.background_parameters.keys():
background_parameters = fit_global_parameters.get_background_parameters_item(key, diffraction_pattern_index)
if not background_parameters is None:
if key == ChebyshevBackground.__name__:
parameters=[background_parameters.c0.value,
background_parameters.c1.value,
background_parameters.c2.value,
background_parameters.c3.value,
background_parameters.c4.value,
background_parameters.c5.value,
background_parameters.c6.value,
background_parameters.c7.value,
background_parameters.c8.value,
background_parameters.c9.value]
add_chebyshev_background(twotheta, I, parameters)
elif key == ExpDecayBackground.__name__:
add_expdecay_background(twotheta,
I,
parameters=[background_parameters.a0.value,
background_parameters.b0.value,
background_parameters.a1.value,
background_parameters.b1.value,
background_parameters.a2.value,
background_parameters.b2.value])
# ADD PSEUDO VOIGTS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.additional_parameters is None:
for key in fit_global_parameters.additional_parameters.keys():
additional_parameters = fit_global_parameters.get_additional_parameters_item(key, diffraction_pattern_index)
if not additional_parameters is None:
if key == SpuriousPeaks.__name__:
add_pseudo_voigt_peaks(twotheta, I, additional_parameters)
return I
def fit_function_reciprocal(s, fit_global_parameters, diffraction_pattern_index = 0):
line_profile = fit_global_parameters.measured_dataset.get_line_profile(diffraction_pattern_index)
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
# CONSTRUCTION OF EACH SEPARATE PEAK ---------------------------------------------------------------------------
phases_number = fit_global_parameters.measured_dataset.get_phases_number()
separated_phases_intensities = numpy.full((phases_number, 2), None)
for phase_index in range(phases_number):
phase = fit_global_parameters.measured_dataset.get_phase(phase_index)
if not Phase.is_cube(phase.symmetry): raise ValueError("Only Cubic structures are supported by fit")
reflections_number = line_profile.get_reflections_number(phase_index)
separated_peaks_functions = numpy.full((reflections_number, 2), None)
for reflection_index in range(reflections_number):
if isinstance(phase, GSASIIPhase):
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=line_profile.get_additional_parameters_of_phase(phase_index))
else:
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters)
separated_peaks_functions[reflection_index, 0] = s_analytical
separated_peaks_functions[reflection_index, 1] = intensity_analytical
# INTERPOLATION ONTO ORIGINAL S VALUES -------------------------------------------------------------------------
intensity_phase = __merge_functions(separated_peaks_functions, s)
# ADD SAXS
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.DELTA and size_parameters.add_saxs:
if not phase.use_structure: NotImplementedError("SAXS is available when the structural model is active")
intensity_phase += saxs(s,
size_parameters.mu.value,
phase.a.value,
phase.formula,
phase.symmetry,
size_parameters.normalize_to)
# ADD DEBYE-WALLER FACTOR --------------------------------------------------------------------------------------
thermal_parameters = fit_global_parameters.get_thermal_parameters_item(ThermalParameters.__name__, diffraction_pattern_index)
if not thermal_parameters is None:
debye_waller_factor = thermal_parameters.get_debye_waller_factor(phase_index)
if not debye_waller_factor is None:
intensity_phase *= debye_waller(s, debye_waller_factor.value*0.01) # from A-2 to nm-2
separated_phases_intensities[phase_index, 0] = s
separated_phases_intensities[phase_index, 1] = intensity_phase
intensity = __merge_functions(separated_phases_intensities, s)
if not incident_radiation.is_single_wavelength:
principal_wavelength = incident_radiation.wavelength
intensity_scaled = intensity*incident_radiation.get_principal_wavelenght_weight()
separated_secondary_intensities = numpy.full((len(incident_radiation.secondary_wavelengths), 2), None)
secondary_index = 0
for secondary_wavelength, secondary_wavelength_weigth in zip(incident_radiation.secondary_wavelengths,
incident_radiation.secondary_wavelengths_weights):
s_secondary = s * secondary_wavelength.value/principal_wavelength.value
separated_secondary_intensities[secondary_index, 0] = s_secondary
separated_secondary_intensities[secondary_index, 1] = intensity*secondary_wavelength_weigth.value
secondary_index += 1
intensity = intensity_scaled + __merge_functions(separated_secondary_intensities, s)
return intensity
#################################################
# FOURIER FUNCTIONS
#################################################
class FourierTranformFactory:
@classmethod
def get_fourier_transform(cls, type=FFTTypes.REAL_ONLY):
if type == FFTTypes.REAL_ONLY:
return FourierTransformRealOnly
elif type == FFTTypes.FULL:
return FourierTransformFull
else:
raise ValueError("Type not recognized")
class FourierTransform:
@classmethod
def fft(cls, f, n_steps, dL):
raise NotImplementedError()
@classmethod
def get_empty_fft(cls, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
I = numpy.zeros(len(s))
I[int(len(s)/2)] = 1.0
return s, I
class FourierTransformRealOnly(FourierTransform):
@classmethod
def _real_absolute_fourier(cls, y):
return numpy.fft.fftshift(numpy.abs(numpy.real(numpy.fft.fft(y))))
@classmethod
def _fft_normalized(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
integral = numpy.trapz(y_fft, s)
return s, y_fft / integral
@classmethod
def fft(cls, f, n_steps, dL):
return cls._fft_normalized(cls._real_absolute_fourier(f), n_steps, dL)
from scipy.integrate import simps
class FourierTransformFull(FourierTransform):
@classmethod
def _full_fourier(cls, y):
return numpy.fft.fftshift(numpy.fft.fft(y))
@classmethod
def _fft_shifted(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
y_fft -= y_fft[0]
return s, y_fft
@classmethod
def _fft_real(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.real(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _fft_imag(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.imag(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _normalize(cls, s, i):
return s, i/simps(i, s)
@classmethod
def fft(cls, f, n_steps, dL):
sr, fft_real = cls._fft_real(numpy.real(f), n_steps, dL)
si, fft_imag = cls._fft_imag(numpy.imag(f), n_steps, dL)
return cls._normalize(sr, fft_real - fft_imag)
#################################################
# CALCOLO DI UN SINGOLO PICCO
#################################################
def create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=None):
fft_type = fit_global_parameters.fit_initialization.fft_parameters.fft_type
fit_space_parameters = fit_global_parameters.space_parameters()
reflection = line_profile.get_reflection(phase_index, reflection_index)
wavelength = incident_radiation.wavelength.value
lattice_parameter = phase.a.value
fourier_amplitudes = None
# INSTRUMENTAL PROFILE ---------------------------------------------------------------------------------------------
instrumental_profile_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(Caglioti.__name__, diffraction_pattern_index)
if not instrumental_profile_parameters is None:
if fourier_amplitudes is None:
fourier_amplitudes = instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
else:
fourier_amplitudes *= instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
# SIZE -------------------------------------------------------------------------------------------------------------
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.LOGNORMAL:
if size_parameters.shape == Shape.SPHERE:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.shape == Shape.WULFF:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
else:
fourier_amplitudes *=size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
elif size_parameters.distribution == Distribution.GAMMA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.distribution == Distribution.DELTA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
# STRAIN -----------------------------------------------------------------------------------------------------------
strain_parameters = fit_global_parameters.get_strain_parameters(phase_index)
if not strain_parameters is None and strain_parameters.active:
if isinstance(strain_parameters, InvariantPAH): # INVARIANT PAH
if fourier_amplitudes is None:
fourier_amplitudes = strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
else:
fourier_amplitudes *= strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
elif isinstance(strain_parameters, KrivoglazWilkensModel): # KRIVOGLAZ-WILKENS
if fourier_amplitudes is None:
fourier_amplitudes = strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
else:
fourier_amplitudes *= strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
elif isinstance(strain_parameters, WarrenModel): # WARREN
fourier_amplitudes_re, fourier_amplitudes_im = strain_warren_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.average_cell_parameter.value)
if fft_type == FFTTypes.FULL:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re + 1j*fourier_amplitudes_im
else:
fourier_amplitudes = (fourier_amplitudes*fourier_amplitudes_re) + 1j*(fourier_amplitudes*fourier_amplitudes_im)
elif fft_type == FFTTypes.REAL_ONLY:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re
else:
fourier_amplitudes *= fourier_amplitudes_re
# FFT -----------------------------------------------------------------------------------------------------------
if not fourier_amplitudes is None:
s, I = FourierTranformFactory.get_fourier_transform(fft_type).fft(fourier_amplitudes,
n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
else:
s, I = FourierTransform.get_empty_fft(n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
s_hkl = Utilities.s_hkl(lattice_parameter, reflection.h, reflection.k, reflection.l)
s += s_hkl
# INTENSITY MODULATION: STRUCTURAL MODEL YES/NO --------------------------------------------------------------------
if phase.use_structure:
if isinstance(phase, GSASIIPhase):
I *= phase.intensity_scale_factor.value * gsasii_intensity_factor(reflection.h,
reflection.k,
reflection.l,
gsas_reflections_list)
else:
I *= phase.intensity_scale_factor.value * \
multiplicity_cubic(reflection.h, reflection.k, reflection.l) * \
squared_modulus_structure_factor(s_hkl,
phase.formula,
reflection.h,
reflection.k,
reflection.l,
phase.symmetry)
else:
I *= reflection.intensity.value
#TODO: AGGIUNGERE GESTIONE TDS con strutture dati + widget ad hoc
# PEAK SHIFTS -----------------------------------------------------------------------------------------------------
if not fit_global_parameters.shift_parameters is None:
theta = Utilities.theta(s, wavelength)
for key in fit_global_parameters.shift_parameters.keys():
shift_parameters = fit_global_parameters.get_shift_parameters_item(key, diffraction_pattern_index)
if not shift_parameters is None:
if key == Lab6TanCorrection.__name__:
s += lab6_tan_correction(theta, wavelength,
shift_parameters.ax.value,
shift_parameters.bx.value,
shift_parameters.cx.value,
shift_parameters.dx.value,
shift_parameters.ex.value)
elif key == ZeroError.__name__:
s += Utilities.s(shift_parameters.shift.value/2, wavelength)
elif key == SpecimenDisplacement.__name__:
s += specimen_displacement(theta, wavelength, shift_parameters.goniometer_radius, shift_parameters.displacement.value*1e-6) # to m
# LORENTZ FACTOR --------------------------------------------------------------------------------------
if not fit_global_parameters.instrumental_profile_parameters is None:
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_lorentz_factor:
if polarization_parameters.lorentz_formula == LorentzFormula.Shkl_Shkl:
I *= lorentz_factor_simplified_normalized(s_hkl, wavelength)
elif polarization_parameters.lorentz_formula == LorentzFormula.S_Shkl:
I *= lorentz_factor_normalized(s, s_hkl, wavelength)
return s, I
######################################################################
# FUNZIONI WPPM
######################################################################
import numpy
from scipy.special import erfc
import os
# performance improvement
######################################################################
# THERMAL AND POLARIZATION
######################################################################
def debye_waller(s, B):
return numpy.exp(-0.5*B*(s**2)) # it's the exp(-2M) = exp(-Bs^2/2)
def lorentz_factor(s, s_hkl):
return 1/(s*s_hkl)
def lorentz_factor_normalized(s, s_hkl, wavelength):
return lorentz_factor(s, s_hkl)/numpy.sqrt(1 - (s*wavelength/2)**2)
def lorentz_factor_simplified(s_hkl):
return 1/(s_hkl**2)
def lorentz_factor_simplified_normalized(s_hkl, wavelength):
return lorentz_factor_simplified(s_hkl)/numpy.sqrt(1 - (s_hkl*wavelength/2)**2)
def polarization_factor(twotheta, twotheta_mono, degree_of_polarization, beampath):
Q = degree_of_polarization
if twotheta_mono is None or twotheta_mono == 0.0:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta)**2))/2
else:
if beampath == Beampath.PRIMARY:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta_mono)**2)*(numpy.cos(twotheta)**2))/(1 + (numpy.cos(twotheta_mono)**2))
elif beampath == Beampath.SECONDARY:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta_mono)**2)*(numpy.cos(twotheta)**2))/2
######################################################################
# SIZE
######################################################################
from scipy.special import gamma as G, gammaincc as GU
def size_function_delta(L, D):
LfracD = L/D
return 1 - 1.5*LfracD + 0.5*LfracD**3
def size_function_lognormal(L, sigma, mu):
modL = numpy.abs(L)
lnModL = numpy.log(modL)
sqrt2 = numpy.sqrt(2)
size = 0.5*erfc((lnModL - mu -3*sigma**2)/(sigma*sqrt2)) + \
-0.75*modL*erfc((lnModL - mu -2*sigma**2)/(sigma*sqrt2))*numpy.exp(-mu - 2.5*sigma**2) + \
0.25*(L**3)*erfc((lnModL - mu)/(sigma*sqrt2)) *numpy.exp(-3*mu - 4.5*sigma**2)
return size
def size_function_gamma(L, g, mu):
Lgm = L*g/mu
size = ((0.5*(Lgm**3)*GU(g, Lgm)) - \
(1.5*Lgm*GU(g+2, Lgm)) + \
GU(g+3, Lgm)) / G(g+3)
return size
def lognormal_distribution(mu, sigma, x):
return numpy.exp(-0.5*((numpy.log(x)-mu)/(sigma))**2)/(x*sigma*numpy.sqrt(2*numpy.pi))
def delta_distribution(mu, x):
distribution = numpy.zeros(len(x))
distribution[numpy.where(x==mu)] = 1.0
return distribution
def gamma_distribution(mu, g, x):
gxm = g*x/mu
return (g/(mu*G(g)))*(gxm**(g-1))*numpy.exp(-gxm)
def york_distribution(mu, g, x):
gxm = g*x/mu
return (g/(mu*G(g)))*(gxm**g)*numpy.exp(-gxm)
def lognormal_average(mu, sigma):
return lognormal_moment(1, mu, sigma)
def lognormal_average_surface_weigthed(mu, sigma):
return numpy.exp(mu+1.25*sigma**2)
def lognormal_average_volume_weigthed(mu, sigma):
return numpy.exp(mu+1.75*sigma**2)
def lognormal_standard_deviation(mu, sigma):
return numpy.sqrt(numpy.exp(2*mu + sigma**2)*(numpy.exp(sigma**2)-1))
def lognormal_moment(n, mu, sigma):
return numpy.exp(n*mu + 0.5*(n**2)*(sigma**2))
######################################################################
# SIZE - WULFF SOLIDS
######################################################################
THRESHOLD = 1e-3
class WulffSolidDataRow:
def __init__(self,
h, k, l,
level, limit_dist,
aa, bb, cc, dd, chi_square_1,
a0, b0, c0, d0, xj, a1, b1, c1, d1, xl, chi_square_2):
self.h = h
self.k = k
self.l = l
self.level = level
self.limit_dist = limit_dist
self.aa = aa
self.bb = bb
self.cc = cc
self.dd = dd
self.chi_square_1 = chi_square_1
self.a0 = a0
self.b0 = b0
self.c0 = c0
self.d0 = d0
self.xj = xj
self.a1 = a1
self.b1 = b1
self.c1 = c1
self.d1 = d1
self.xl = xl
self.chi_square_2 = chi_square_2
@classmethod
def parse_row(cls, row):
return WulffSolidDataRow(int(row[0]),
int(row[1]),
int(row[2]),
int(row[3]),
row[4],
row[5],
row[6],
row[7],
row[8],
row[9],
row[10],
row[11],
row[12],
row[13],
row[14],
row[15],
row[16],
row[17],
row[18],
row[19],
row[20])
@classmethod
def get_key(cls, h, k, l, level):
return str(int(h)) + str(int(k)) + str(int(l)) + "_" + str(int(level))
def key(self):
return WulffSolidDataRow.get_key(self.h, self.k, self.l, self.level)
def __str__(self):
return str(self.h ) + " " + \
str(self.k ) + " " + \
str(self.l ) + " " + \
str(self.level ) + " " + \
str(self.limit_dist ) + " " + \
str(self.aa ) + " " + \
str(self.bb ) + " " + \
str(self.cc ) + " " + \
str(self.dd ) + " " + \
str(self.chi_square_1) + " " + \
str(self.a0 ) + " " + \
str(self.b0 ) + " " + \
str(self.c0 ) + " " + \
str(self.d0 ) + " " + \
str(self.xj ) + " " + \
str(self.a1 ) + " " + \
str(self.b1 ) + " " + \
str(self.c1 ) + " " + \
str(self.d1 ) + " " + \
str(self.xl ) + " " + \
str(self.chi_square_2)
def __load_wulff_solids_data(file_name):
wulff_data_path = os.path.join(os.path.dirname(__file__), "data")
wulff_data_path = os.path.join(wulff_data_path, "wulff_solids")
rows = numpy.loadtxt(os.path.join(wulff_data_path, file_name), skiprows=2)
wulff_solids_data = {}
for row in rows:
wulff_solids_data_row = WulffSolidDataRow.parse_row(row)
wulff_solids_data[wulff_solids_data_row.key()] = wulff_solids_data_row
return wulff_solids_data
if not 'wulff_solids_data_hexagonal' in globals():
wulff_solids_data_hexagonal = __load_wulff_solids_data("Cube_TruncatedCubeHexagonalFace_L_FIT.data")
wulff_solids_data_triangular = __load_wulff_solids_data("Cube_TruncatedCubeTriangularFace_L_FIT.data")
def __get_wulff_solid_Hj_coefficients(h, k, l, truncation, face): # N.B. L, truncation >= 0!
# x - x1 / x2 - x1 = y - y1 / y2 - y1
# x1 = 0, x2 = 1
# -> y = y1 + x (y2 - y1)
def __point_in_between(y1, y2, x):
return y1 + x*(y2 - y1)
divisor = numpy.gcd.reduce([h, k, l])
truncation_on_file = 100.0*truncation
if truncation_on_file.is_integer():
if face == WulffCubeFace.TRIANGULAR:
wulff_solid_data_row = wulff_solids_data_triangular[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, truncation_on_file)]
else:
wulff_solid_data_row = wulff_solids_data_hexagonal[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, truncation_on_file)]
return wulff_solid_data_row
else:
x = truncation % 1 # decimal part
if face == WulffCubeFace.TRIANGULAR:
coefficients_bottom = wulff_solids_data_triangular[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, int(truncation_on_file))]
coefficients_top = wulff_solids_data_triangular[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, min(100, 1 + int(truncation_on_file)))]
else:
coefficients_bottom = wulff_solids_data_hexagonal[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, int(truncation_on_file))]
coefficients_top = wulff_solids_data_hexagonal[WulffSolidDataRow.get_key(h/divisor, k/divisor, l/divisor, min(100, 1 + int(truncation_on_file)))]
wulff_solid_data_row = WulffSolidDataRow(h,
k,
l,
truncation_on_file,
__point_in_between(coefficients_top.limit_dist , coefficients_bottom.limit_dist , x),
__point_in_between(coefficients_top.aa , coefficients_bottom.aa , x),
__point_in_between(coefficients_top.bb , coefficients_bottom.bb , x),
__point_in_between(coefficients_top.cc , coefficients_bottom.cc , x),
__point_in_between(coefficients_top.dd , coefficients_bottom.dd , x),
__point_in_between(coefficients_top.chi_square_1, coefficients_bottom.chi_square_1, x),
__point_in_between(coefficients_top.a0 , coefficients_bottom.a0 , x),
__point_in_between(coefficients_top.b0 , coefficients_bottom.b0 , x),
__point_in_between(coefficients_top.c0 , coefficients_bottom.c0 , x),
__point_in_between(coefficients_top.d0 , coefficients_bottom.d0 , x),
__point_in_between(coefficients_top.xj , coefficients_bottom.xj , x),
__point_in_between(coefficients_top.a1 , coefficients_bottom.a1 , x),
__point_in_between(coefficients_top.b1 , coefficients_bottom.b1 , x),
__point_in_between(coefficients_top.c1 , coefficients_bottom.c1 , x),
__point_in_between(coefficients_top.d1 , coefficients_bottom.d1 , x),
__point_in_between(coefficients_top.xl , coefficients_bottom.xl , x),
__point_in_between(coefficients_top.chi_square_2, coefficients_bottom.chi_square_2, x))
return wulff_solid_data_row
def size_function_wulff_solids_lognormal(L, h, k, l, sigma, mu, truncation, face):
def __lognormal_momentum(mu, sigma2, n):
return numpy.exp((n*mu) + (0.5*sigma2*(n**2)))
def __FFourierLognormal(poly_coefficients, L, Kc, mu, sigma2, ssqrt2, M3, is_array):
if is_array:
A = numpy.zeros(len(L))
else:
A = 0.0
for n in range(len(poly_coefficients)):
A += poly_coefficients[n]*\
erfc((numpy.log(L*Kc)-mu-((3.0-n)*sigma2))/ssqrt2)*\
(L**n)*0.5*__lognormal_momentum(mu, sigma2, 3-n)/M3
if is_array:
A[numpy.where(A <= 1e-20)] = 0.0
else:
if A <= 1e-20: A = 0.0
return A
is_array = isinstance(L, list) or isinstance(L, numpy.ndarray)
if not is_array and L==0: return 1.0
sigma2 = sigma*sigma
ssqrt2 = sigma*numpy.sqrt(2.0)
M3 = __lognormal_momentum(mu, sigma2, 3)
coefficients = __get_wulff_solid_Hj_coefficients(h, k, l, truncation, face)
Hn_do1 = numpy.array([coefficients.a0, coefficients.b0, coefficients.c0, coefficients.d0])
Hn_do2 = numpy.array([coefficients.a1, coefficients.b1, coefficients.c1, coefficients.d1])
Hn_LD = coefficients.limit_dist * 0.01
Hn_Kc = 1/Hn_LD
Hn_xj = coefficients.xj
if numpy.abs(Hn_xj-1.0)<THRESHOLD:
fourier_amplitude = __FFourierLognormal(Hn_do1, L*Hn_Kc, 1.0, mu, sigma2, ssqrt2, M3, is_array)
else:
fourier_amplitude = __FFourierLognormal(Hn_do2, L*Hn_Kc, 1. , mu, sigma2, ssqrt2, M3, is_array) # integr(f2) on LK
fourier_amplitude += __FFourierLognormal(Hn_do1, L*Hn_Kc, 1./Hn_xj, mu, sigma2, ssqrt2, M3, is_array) # (integr(f1)) on LKxj
fourier_amplitude -= __FFourierLognormal(Hn_do2, L*Hn_Kc, 1./Hn_xj, mu, sigma2, ssqrt2, M3, is_array) # (integr(f2)) on LKxj
if is_array:
fourier_amplitude[numpy.where(L == 0.0)] = 1.0
fourier_amplitude[numpy.where(fourier_amplitude < 0.0)] = 0.0
fourier_amplitude[numpy.where(fourier_amplitude > 1.0)] = 1.0
fourier_amplitude[2:][numpy.where(numpy.greater(fourier_amplitude[2:], fourier_amplitude[1:-1]))] = 0
else:
if fourier_amplitude > 1.0 : return 1.0
if fourier_amplitude < 0.0 : return 0.0
#check the previous
return fourier_amplitude
######################################################################
# STRAIN
######################################################################
# INVARIANT PAH --------------------------------
def strain_invariant_function_pah(L, h, k, l, lattice_parameter, a, b, C_hkl):
s_hkl = Utilities.s_hkl(lattice_parameter, h, k, l)
return numpy.exp(-((2*numpy.pi**2)/((s_hkl**2)*(lattice_parameter**4))) * C_hkl * (a*L + b*(L**2)))
def displacement_invariant_pah(L, h, k, l, a, b, C_hkl):
return numpy.sqrt((C_hkl*(a*L + b*(L**2)))/((h**2+k**2+l**2)**2))
# Krivoglaz-Wilkens --------------------------------
from scipy import integrate
from numpy import pi, log, sqrt, arcsin, sin, cos # TO SHORTEN FORMULAS
def clausen_integral_inner_function(t):
return log(2*sin(t/2))
def clausen_integral(x=0.0):
_v_integrate_quad = numpy.vectorize(integrate.quad)
return -1*(_v_integrate_quad(lambda t: clausen_integral_inner_function(t), 0.0, x)[0])
def f_star(eta, use_simplified_calculation=True):
is_array = isinstance(eta, list) or isinstance(eta, numpy.ndarray)
if not is_array:
if eta >= 1:
return (256/(45*pi*eta)) - ((11/24) + (log(2) - log(eta))/4)/(eta**2)
else:
if use_simplified_calculation:
return (7/4) - log(2) - log(eta) + ((eta**2)/6) - (32*(eta**3))/(225*pi)
else:
return (256/(45*pi*eta)) \
+ ((eta**2)/6) - log(2) - log(eta) \
+ -eta*sqrt(1-(eta**2))*(769 + 4*(eta**2)*(20.5 + (eta**2)))/(180*pi*(eta**2)) \
+ -((45 - 180*eta**2)*clausen_integral(2*arcsin(eta)) \
+ (15*arcsin(eta)*(11 + 4*(eta**2)*(10.5 + (eta**2)) + (6 - 24*(eta**2))*(log(2) + log(eta)))))/(180*pi*(eta**2))
else:
result = numpy.zeros(len(eta))
cursor_1 = numpy.where(eta >= 1)
cursor_2 = numpy.where(eta < 1)
eta1 = eta[cursor_1]
eta2 = eta[cursor_2]
result[cursor_1] = (256/(45*pi*eta1)) - ((11/24) + (log(2) - log(eta1))/4)/(eta1**2)
if use_simplified_calculation:
result[cursor_2] = (7/4) - log(2) - log(eta2) + ((eta2**2)/6) - (32*(eta2**3))/(225*pi)
else:
result[cursor_2] = (256/(45*pi*eta2)) \
+ ((eta2**2)/6) - log(2) - log(eta2) \
+ -eta2*sqrt(1-(eta2**2))*(769 + 4*(eta2**2)*(20.5 + (eta2**2)))/(180*pi*(eta2**2)) \
+ -((45 - 180*eta2**2)*clausen_integral(2*arcsin(eta2)) \
+ (15* | arcsin(eta2) | numpy.arcsin |
import numpy as np
import itertools
import multiprocessing
MAX_ITERATES = 5000
class Sampling(object):
def __init__(self, seed=1):
self.reset(seed)
def reset(self, seed=1):
self.rng = np.random.RandomState(seed)
self.seed = seed
class NegativeSampling(Sampling):
def __init__(self, seed=1):
super().__init__(seed=seed)
def sample(self, data, n_possibilities):
"""
Implements vanilla negative sampling.
Either the head entity or the tail entity is replaced with an entity
from the total number of possible entities.
"""
# select whether we should replace the head or the tails
data = data.copy()
entity_to_replace = self.rng.choice([0, 2], replace=True, size=data.shape[0])
entity_to_replace_with = self.rng.randint(n_possibilities, size=data.shape[0])
data[np.arange(0, data.shape[0]), entity_to_replace] = entity_to_replace_with
return data
class SmartNegativeSampling(Sampling):
"""
Implements smart negative sampling where the head or tail entity is replaced
with an entity that is allowed at that position based on the relationship.
To use this in Dataset:
```
sns = SmartNegativeSampling(kb)
dset = Dataset(kb, sampler=sns.smart_triple_corruption)
```
"""
def __init__(self, kb, workers=4, seed=1):
super().__init__(seed=seed)
self.workers = workers
self.kb = kb
assert kb._converted_triples
data = np.array(kb.triples, dtype=np.int32)
# this stores the unique relations in our dataset
unique_relations = np.unique(data[:, 1]).tolist()
# we now find all possible heads and tails that satisfy the relation
# this will be used in the classificaiton task as the triples
# that should be scored negatively
possible_heads = {}
possible_tails = {}
for r in unique_relations:
possible_heads[r] = data[np.where(data[:, 1] == r), 0][0].tolist()
possible_tails[r] = data[ | np.where(data[:, 1] == r) | numpy.where |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,1,0,0,0,-1,-1,0,0]) | numpy.array |
import numpy as np
import scipy as sp
def sim_state_eq( A, B, xi, U):
"""This function caclulates the trajectory for the network given our model
if there are no constraints, and the target state is unknown, using the
control equation precess x(t+1) = Ax(t) + BU(t). x(t) is the state vector, A is
the adjacency matrix, U(t) is the time varying input as specified by the
user, and B selects the control set (stimulating electrodes)
Args:
A : NxN state matrix (numpy array), where N is the number of nodes in your
network (for example, a structural connectivity matrix
constructed from DTI). A should be stable to prevent
uncontrolled trajectories.
B : NxN input matrix (numpy array), where N is the number of nodes. B
selects where you want your input energy to be applied to.
For example, if B is the Identity matrix, then input energy
will be applied to all nodes in the network. If B is a
matrix of zeros, but B(1,1) = 1. then energy will only be
applied at the first node.
xi : Nx1 initial state (numpy array) of your system where N is the number of
nodes. xi MUST have N rows.
U : NxT matrix of Energy (numpy array), where N is the number of nodes
and T is the number of
time points. For example, if you want to simulate the
trajectory resulting from stimulation, U could have
log(StimFreq)*StimAmp*StimDur as every element. You can
also enter U's that vary with time
Returns:
x : x is the NxT trajectory (numpy array) that results from simulating
x(t+1) = Ax(t) + Bu(t) the equation with the parameters
above.
@author JStiso
June 2017
"""
# Simulate trajectory
T = np.size(U,1)
N = np.size(A,0)
# initialize x
x = np.zeros((N, T))
xt = xi
for t in range(T):
x[:,t] = np.reshape(xt, N) # annoying python 1d array thing
xt_1 = np.matmul(A,xt) + np.matmul(B,np.reshape(U[:,t],(N,1) ))# state equation
xt = xt_1
return x
def optimal_energy(A, T, B, x0, xf, rho, S):
"""This is a python adaptation of matlab code originally written by <NAME> and <NAME>
compute optimal inputs/trajectories for a system to transition between two states
<NAME> September 2017
Args:
A: (NxN numpy array) Structural connectivity matrix
B: (NxN numpy array) Input matrix: selects which nodes to put input into. Define
so there is a 1 on the diagonal of elements you want to add input to,
and 0 otherwise
S: (NxN numpy array) Selects nodes whose distance you want to constrain, Define so
that there is a 1 on the diagonal of elements you want to
constrain, and a zero otherwise
T: (float) Time horizon: how long you want to control for. Too large will give
large error, too short will not give enough time for control
rho: (float) weights energy and distance constraints. Small rho leads to larger
energy
Returns:
X_opt: (TxN numpy array)
The optimal trajectory through state space
U_opt: (TxN numpy array)
The optimal energy
n_err: (float)
the error associated with this calculation. Errors will be larger when B is not identity,
and when A is large. Large T and rho will also tend to increase the error
-------------- Change Log -------------
JStiso April 2018
Changed S to be an input, rather than something defined internally
<NAME> January 2021
Changed the forward propagation of states to matrix exponential to
avoid reliance on MATLAB toolboxes. Also changed definition of expanded
input U to save time by avoiding having to resize the matrix.
Also changed the initialization of U_opt for the same reason.
JStiso 2021
Translated to Python
"""
n = np.shape(A)[1]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
Sbar = np.eye(n) - S
np.shape(np.dot(-B,B.T)/(2*rho))
Atilde = np.concatenate((np.concatenate((A, np.dot(-B,B.T)/(2*rho)), axis=1),
np.concatenate((-2*S, -A.T), axis=1)), axis=0)
M = sp.linalg.expm(Atilde*T)
M11 = M[0:n,0:n]
M12 = M[0:n,n:]
M21 = M[n:,0:n]
M22 = M[n:,n:]
N = np.linalg.solve(Atilde,(M-np.eye( | np.shape(Atilde) | numpy.shape |
import numpy as np
import itertools
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import os
# change tensorflow level for removing warning message
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def test1():
assert 1 == 0
def test2():
a = tf.constant(np.array([1, 0, 0, 1]), dtype=tf.float32)
result = tf.constant(np.array([3, 0, 0, 3]), dtype=tf.float32)
assert tf.reduce_all(tf.equal(3 * a, result))
def test3():
a = tf.constant(np.array([1, 2, 3, 4]), dtype=tf.float32)
state_diag = tf.linalg.diag(a)
result = tf.constant(np.array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]]), dtype=tf.float32)
assert tf.reduce_all(tf.equal(state_diag, result))
def test4():
a = tf.constant(np.array([1, 2, 3, 4]), dtype=tf.float32)
b = tf.constant( | np.array([3, 1, 3, 1]) | numpy.array |
import numpy as np
import tensorflow as tf
def build_laplace(n,boundary='0'):
if n==1:
return np.zeros((1,1),dtype=np.float32)
d1 = -2 * np.ones((n,),dtype=np.float32)
d2 = 1 * np.ones((n-1,),dtype=np.float32)
lap = np.zeros((n,n),dtype=np.float32)
lap[range(n),range(n)]=d1
lap[range(1,n),range(n-1)]=d2
lap[range(n-1),range(1,n)]=d2
if boundary=='0':
lap[0,0]=lap[n-1,n-1]=-1
return lap
def lower_build_laplace(n,boundary='0'):
if n==1:
return np.zeros((1,1),dtype=np.float32)
d1 = -1 * np.ones((n,),dtype=np.float32)
d2 = np.ones((n-1,),dtype=np.float32)
lap = np.zeros((n,n),dtype=np.float32)
lap[range(n),range(n)]=d1
lap[range(1,n),range(n-1)]=d2
if boundary=='0':
lap[0,0]=0
return lap
def upper_build_laplace(n,boundary='0'):
if n==1:
return np.zeros((1,1),dtype=np.float32)
d1 = -1 * np.ones((n,),dtype=np.float32)
d2 = np.ones((n-1,),dtype=np.float32)
lap = np.zeros((n,n),dtype=np.float32)
lap[range(n),range(n)]=d1
lap[range(n-1),range(1,n)]=d2
if boundary=='0':
lap[n-1,n-1]=0
return lap
def eigenstate(n,i):
lap = build_laplace(n)
ew, ev = np.linalg.eigh(lap)
res = ev[:,i]
return res
def coeffs(n):
a = np.zeros((n,))
a[-1]=1
coefs = []
for i in range(n):
coefs.append(eigenstate(n,i)@a)
print(coefs)
@tf.function
def coup_nonlin_osc_batch( x, control):
n_osc = x.shape[1]//2
# natural time evo
a1 = np.array([[0,1],[-1,0]],dtype=np.float32)
a2 = np.eye(n_osc,dtype=np.float32)
A = np.kron(a1,a2)
x_dot1 = tf.tensordot(x,A,axes = (1,1))
# linear interaction term
interaction_strength = 0.0
b1 = np.array([[0,0],[1,0]],dtype=np.float32)
b2 = build_laplace(n_osc)
B = interaction_strength * np.kron(b1,b2)
x_dot2 = tf.tensordot(x,B, axes=(1, 1))
# control term
control_vector = np.zeros((n_osc,),dtype=np.float32)
control_vector[-1] = 3.0
c1 = np.array([0,1],dtype=np.float32)
c2 = control_vector
C = np.kron(c1,c2)
x_dot3 = tf.tensordot(control,C, axes=0)
# cubic interaction term
cubic_interaction_strength = 1.0
d1 = np.array([[0,0],[1,0]],dtype=np.float32)
d2a = upper_build_laplace(n_osc)
d2b = lower_build_laplace(n_osc)
Da = cubic_interaction_strength * | np.kron(d1,d2a) | numpy.kron |
"""Script to apply different linear regression methods with resampling to data."""
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from regression_analysis.fit_model import linear_regression
from regression_analysis.utils import franke
def create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate, num_min_batch, epochs):
"""Create array filled with zeros."""
return np.zeros([len(order), len(num_points), len(noise_var), len(test_ratios), len(ridge_lambda), len(lasso_lambda), len(n_boots),
len(k_folds), len(learn_rate), len(num_min_batch), len(epochs)])
def apply_regression(order, num_points, noise_var, test_ratio_array=np.zeros(1), reg_type="ols", ridge_lambda=np.ones(1),
lasso_lambda=np.ones(1), n_boots=np.ones(1, dtype=int), k_folds=np.ones(1, dtype=int)):
"""
Apply specified linear regression method to data with given parameters
:param order: order of polynomial which will be fitted
:param num_points: number of points to be used for the simulation
:param noise_var: noise variance to be used in Franke function
:param test_ratio_array: size of testing data set
:param reg_type: fitting method to be used
:param ridge_lambda: lambda for ridge regression
:param lasso_lambda: lambda for lasso regression
:param n_boots: number of bootstraps
:param k_folds: number of folds to be used in cross-validation
"""
# applies regression for multiple parameter combos
train_MSE_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
test_MSE_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
train_R2_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
test_R2_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
# bias in test set
test_bias_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
# variance in test set
test_var_arr = np.zeros([len(order), len(num_points), len(noise_var), len(test_ratio_array), len(ridge_lambda), len(lasso_lambda),
len(n_boots), len(k_folds)])
# Calculate statistical indicators for given regression type and different resampling methods
for points_ind, num in enumerate(num_points):
for noise_ind, var in enumerate(noise_var):
# Create data from Franke function
xx1, xx2, y = franke.create_data(num_points=num, noise_variance=var)
linear_reg = linear_regression.linear_regression2D(xx1, xx2, y)
for order_ind, ordr in enumerate(order):
for ratio_ind, test_ratio in enumerate(test_ratio_array):
if reg_type == "ols":
linear_reg.apply_leastsquares(order=ordr, test_ratio=test_ratio, reg_method="ols")
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0] = linear_reg.testvar
elif reg_type == "ols_bootstrap":
for boot_ind, n_boot in enumerate(n_boots):
linear_reg.apply_leastsquares_bootstrap(order=ordr, test_ratio=test_ratio,
n_boots=n_boot, reg_method="ols")
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0] = linear_reg.testvar
elif reg_type == "ols_crossvalidation":
# note test_ratio_array is of length one for crossvalidation. we don't need test ratio
for fold_ind, k_fold in enumerate(k_folds):
linear_reg.apply_leastsquares_crossvalidation(order=ordr, kfolds=k_fold, reg_method="ols")
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind] = linear_reg.testvar
elif reg_type == "ridge":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
linear_reg.apply_leastsquares(order=ordr, test_ratio=test_ratio, reg_method="ridge",
lmbda=ridge_lam)
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0] = linear_reg.testvar
elif reg_type == "ridge_bootstrap":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
for boot_ind, n_boot in enumerate(n_boots):
linear_reg.apply_leastsquares_bootstrap(order=ordr, test_ratio=test_ratio,
n_boots=n_boot, reg_method="ridge",
lmbda=ridge_lam)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0] = linear_reg.testvar
elif reg_type == "ridge_crossvalidation":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
for fold_ind, k_fold in enumerate(k_folds):
linear_reg.apply_leastsquares_crossvalidation(order=ordr, kfolds=k_fold, reg_method="ridge",
lmbda=ridge_lam)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind] = linear_reg.testvar
elif reg_type == "lasso":
for lasso_lam_ind, lasso_lam in enumerate(lasso_lambda):
linear_reg.apply_leastsquares(order=ordr, test_ratio=test_ratio, reg_method="scikit_lasso",
lmbda=lasso_lam)
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, 0] = linear_reg.testvar
elif reg_type == "lasso_bootstrap":
for lasso_lam_ind, lasso_lam in enumerate(lasso_lambda):
for boot_ind, n_boot in enumerate(n_boots):
linear_reg.apply_leastsquares_bootstrap(order=ordr, test_ratio=test_ratio,
n_boots=n_boot, reg_method="scikit_lasso",
lmbda=lasso_lam)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, boot_ind, 0] = linear_reg.testvar
elif reg_type == "lasso_crossvalidation":
for lasso_lam_ind, lasso_lam in enumerate(lasso_lambda):
for fold_ind, k_fold in enumerate(k_folds):
linear_reg.apply_leastsquares_crossvalidation(order=ordr, kfolds=k_fold, reg_method="scikit_lasso",
lmbda=lasso_lam)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, lasso_lam_ind, 0, fold_ind] = linear_reg.testvar
return train_MSE_arr, test_MSE_arr, train_R2_arr, test_R2_arr, test_bias_arr, test_var_arr
def apply_regression_sgd(order, num_points, noise_var, test_ratios=np.zeros(1), reg_type="ols", ridge_lambda=np.ones(1),
lasso_lambda=np.ones(1), n_boots=np.ones(1, dtype=int), k_folds=np.ones(1, dtype=int), learn_rate=np.ones(1),
num_min_batch=np.ones(1), epochs=np.ones(1)):
"""
Apply specified linear regression method with stochastic gradient descent to data with given parameters
:param order: order of polynomial which will be fitted
:param num_points: number of points to be used for the simulation
:param noise_var: noise variance to be used in Franke function
:param test_ratios: size of testing data set
:param reg_type: fitting method to be used
:param ridge_lambda: lambda for ridge regression
:param lasso_lambda: lambda for lasso regression
:param n_boots: number of bootstraps
:param k_folds: number of folds to be used in cross-validation
:param learn_rate: learn rate for stochastic gradient descent
:param num_min_batch: size of mini batches for stochastic gradient descent
:param epochs: number of epochs for stochastic gradient descent
"""
# applies regression for multiple parameter combos
train_MSE_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
test_MSE_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
train_R2_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
test_R2_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
# bias in test set
test_bias_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
# variance in test set
test_var_arr = create_0(order, num_points, noise_var, test_ratios, ridge_lambda, lasso_lambda, n_boots, k_folds, learn_rate,
num_min_batch, epochs)
# Calculate statistical indicators for given regression type and different resampling methods
for points_ind, num in enumerate(num_points):
for noise_ind, var in enumerate(noise_var):
# Create data from Franke function
xx1, xx2, y = franke.create_data(num_points=num, noise_variance=var)
# Create regression object
linear_reg = linear_regression.linear_regression2D(xx1, xx2, y)
for order_ind, ordr in enumerate(order):
for ratio_ind, test_ratio in enumerate(test_ratios):
if reg_type == "ols_sgd":
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares(order=ordr, test_ratio=test_ratio, reg_method="ols", num_epoch=epoch,
learn_rate=learn_rat, num_min_batch=num_batch)
train_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
elif reg_type == "ols_bootstrap_sgd":
for boot_ind, n_boot in enumerate(n_boots):
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares_bootstrap(order=ordr, test_ratio=test_ratio, n_boots=n_boot,
reg_method="ols", num_epoch=epoch, learn_rate=learn_rat,
num_min_batch=num_batch)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
elif reg_type == "ols_crossvalidation_sgd":
# note test_ratio is of length one for crossvalidation. we don't need test ratio
for fold_ind, k_fold in enumerate(k_folds):
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares_crossvalidation(order=ordr, kfolds=k_fold, reg_method="ols",
num_epoch=epoch, learn_rate=learn_rat,
num_min_batch=num_batch)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, 0, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
elif reg_type == "ridge_sgd":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares(order=ordr, test_ratio=test_ratio, reg_method="ridge",
lmbda=ridge_lam, num_epoch=epoch, learn_rate=learn_rat,
num_min_batch=num_batch)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
elif reg_type == "ridge_bootstrap_sgd":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
for boot_ind, n_boot in enumerate(n_boots):
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares_bootstrap(order=ordr, test_ratio=test_ratio, n_boots=n_boot,
reg_method="ridge", lmbda=ridge_lam, num_epoch=epoch,
learn_rate=learn_rat, num_min_batch=num_batch)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, boot_ind, 0, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
elif reg_type == "ridge_crossvalidation_sgd":
for ridge_lam_ind, ridge_lam in enumerate(ridge_lambda):
for fold_ind, k_fold in enumerate(k_folds):
for epoch_ind, epoch in enumerate(epochs):
for learn_ind, learn_rat in enumerate(learn_rate):
for batch_ind, num_batch in enumerate(num_min_batch):
linear_reg.apply_leastsquares_crossvalidation(order=ordr, kfolds=k_fold, reg_method="ridge",
lmbda=ridge_lam, num_epoch=epoch,
learn_rate=learn_rat, num_min_batch=num_batch)
train_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.trainMSE
test_MSE_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testMSE
train_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.trainR2
test_R2_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testR2
test_bias_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testbias
test_var_arr[
order_ind, points_ind, noise_ind, ratio_ind, ridge_lam_ind, 0, 0, fold_ind, learn_ind, batch_ind, epoch_ind] = linear_reg.testvar
return train_MSE_arr, test_MSE_arr, train_R2_arr, test_R2_arr, test_bias_arr, test_var_arr
def get_data_path():
"""
Get the directory from which the script is executed to load the data correctly. This is especially important for
the execution in a jupyter notebook.
"""
current_path = os.getcwd()
current_directory = current_path[current_path.rindex(os.sep) + 1:]
if current_directory == 'examples':
data_path = 'data_linear_regression/'
elif current_directory == 'regression_analysis':
data_path = 'examples/data_linear_regression/'
elif current_directory == 'CompSci-Project-1':
data_path = 'regression_analysis/examples/data_linear_regression/'
else:
raise Exception('This script is not in the correct directory.')
return data_path
def get_data_path_sgd():
"""
Get the directory from which the scripts is executed to load the data correctly. This is especially important for
the execution in a jupyter notebook.
"""
current_path = os.getcwd()
current_directory = current_path[current_path.rindex(os.sep) + 1:]
if current_directory == 'examples':
data_path = 'data_linear_regression_sgd/'
elif current_directory == 'regression_analysis':
data_path = 'examples/data_linear_regression_sgd/'
elif current_directory == 'CompSci-Project-1':
data_path = 'regression_analysis/examples/data_linear_regression_sgd/'
else:
raise Exception('This script is not in the correct directory.')
return data_path
def get_data_statistic(data_path, statistic, method):
"""Load file with given statistical indicator and method."""
file_name = statistic.replace(' ', '_') + method + '.npy'
return np.load(data_path + file_name)
def plot_stat(ratio=0.1, num=100, stat="test MSE", method="ols", n_boot=1000, k_fold=1000, ridge_lmb=122.0, lasso_lmb=112.2):
"""
Create heatmap for given statistical indicator and sampling method
:param ratio: ratio of the dataset to be used for testing
:param num: length of dataset
:param stat: statistical indicator
:param method: resampling method
:param n_boot: number of times bootstrap is performed if method=*_bootstrap
:param k_fold: number of folds for cross-validation if method=*_crossvalidation
:param ridge_lmb: lambda for ridge regression
:param lasso_lmb: lambda for lasso regression
"""
# Path to example data
data_path = get_data_path()
# Load data
order = np.load(data_path + "order.npy")
num_points = np.load(data_path + "num_points.npy")
noise_var = np.load(data_path + "noise_var.npy")
test_ratio = np.load(data_path + "test_ratio.npy")
ridge_lambda = np.load(data_path + "ridge_lambda.npy")
k_folds = | np.load(data_path + "k_folds.npy") | numpy.load |
import numpy as np
import cv2
import random
import time
import math
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from matplotlib.pyplot import imread, imsave
import os
import sys
from PIL import Image
import argparse
args = argparse.ArgumentParser(description='the option of the degradation of EyeQ')
args.add_argument('--degrade_dir', type=str, default='', help='degrade EyeQ dir')
args.add_argument('--gt_dir', type=str, default='', help='high quality cropped image dir')
args.add_argument('--output_dir', type=str, default='./temp', help='degraded output dir')
args.add_argument('--mask_dir', type=str, default='./temp', help='mask output dir')
args = args.parse_args()
'''
===== Gen Mask ====
'''
def _get_center_radius_by_hough(mask):
circles = cv2.HoughCircles((mask*255).astype(np.uint8),cv2.HOUGH_GRADIENT,1,1000,param1=5,param2=5,minRadius=min(mask.shape)//4, maxRadius=max(mask.shape)//2+1)
center = circles[0,0,:2]
radius = circles[0,0,2]
return center, radius
def _get_circle_by_center_bbox(shape,center,bbox,radius):
center_mask=np.zeros(shape=shape).astype('uint8')
tmp_mask=np.zeros(shape=bbox[2:4])
center_tmp=(int(center[0]),int(center[1]))
center_mask=cv2.circle(center_mask,center_tmp[::-1],int(radius),(1),-1)
return center_mask
def get_mask(img):
if img.ndim ==3:
g_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
elif img.ndim == 2:
g_img = img.copy()
else:
raise RuntimeError
h,w = g_img.shape
shape=g_img.shape[0:2]
g_img = cv2.resize(g_img,(0,0),fx = 0.5,fy = 0.5)
tg_img=cv2.normalize(g_img, None, 0, 255, cv2.NORM_MINMAX)
tmp_mask=get_mask_BZ(tg_img)
center, radius = _get_center_radius_by_hough(tmp_mask)
#resize back
center = [center[1]*2,center[0]*2]
radius = int(radius*2)
s_h = max(0,int(center[0] - radius))
s_w = max(0, int(center[1] - radius))
bbox = (s_h, s_w, min(h-s_h,2 * radius), min(w-s_w,2 * radius))
tmp_mask=_get_circle_by_center_bbox(shape,center,bbox,radius)
return tmp_mask,bbox,center,radius
def mask_image(img,mask):
img[mask<=0,...]=0
return img
def supplemental_black_area(img,border=None):
image=img
if border is None:
h,v=img.shape[0:2]
max_l=max(h,v)
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
border=(int(max_l/2-h/2),int(max_l/2-h/2)+h,int(max_l/2-v/2),int(max_l/2-v/2)+v,max_l)
else:
max_l=border[4]
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
image[border[0]:border[1],border[2]:border[3],...]=img
return image,border
def remove_back_area(img,bbox=None,border=None):
image=img
if border is None:
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=np.int)
image=image[border[0]:border[1],border[2]:border[3],...]
return image,border
def get_mask_BZ(img):
if img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
threhold = np.mean(gray_img)/3-7
_, mask = cv2.threshold(gray_img, max(0,threhold), 1, cv2.THRESH_BINARY)
nn_mask = np.zeros((mask.shape[0]+2,mask.shape[1]+2),np.uint8)
new_mask = (1-mask).astype(np.uint8)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (0,0), (0), cv2.FLOODFILL_MASK_ONLY)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (new_mask.shape[1]-1,new_mask.shape[0]-1), (0), cv2.FLOODFILL_MASK_ONLY)
mask = mask + new_mask
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
mask = cv2.erode(mask, kernel)
mask = cv2.dilate(mask, kernel)
return mask
def preprocess(img):
mask, bbox, center, radius = get_mask(img)
r_img = mask_image(img, mask)
# r_img, r_border = remove_back_area(r_img, bbox=bbox)
# mask, _ = remove_back_area(mask, border=r_border)
#r_img, sup_border = supplemental_black_area(r_img)
#mask, _ = supplemental_black_area(mask, border=sup_border)
print(r_img.shape)
print(mask.shape)
return r_img, (mask * 255).astype(np.uint8)
'''
===== Degrade ====
'''
def DE_COLOR(img, brightness=0.3, contrast=0.4, saturation=0.4):
"""Randomly change the brightness, contrast and saturation of an image"""
if brightness > 0:
brightness_factor = random.uniform(max(0.0, 1.0 - brightness), 1.0 + brightness - 0.1) # brightness factor
img = F.adjust_brightness(img, brightness_factor)
if contrast > 0:
contrast_factor = random.uniform(max(0.0, 1.0 - contrast), 1.0 + contrast) # contrast factor
img = F.adjust_contrast(img, contrast_factor)
if saturation > 0:
saturation_factor = random.uniform(max(0.0, 1.0 - saturation), 1.0 + saturation) # saturation factor
img = F.adjust_saturation(img, saturation_factor)
img = transform(img)
img = img.numpy()
color_params = {}
color_params['brightness_factor'] = brightness_factor
color_params['contrast_factor'] = contrast_factor
color_params['saturation_factor'] = saturation_factor
return img, color_params
def DE_HALO(img, h, w, brightness_factor, center=None, radius=None):
'''
Defined to simulate a 'ringlike' halo noise in fundus image
:param weight_r/weight_g/weight_b: Designed to simulate 3 kinds of halo noise shown in Kaggle dataset.
:param center_a/center_b: Position of each circle which is simulated the ringlike shape
:param dia_a/dia_b: Size of each circle which is simulated the ringlike noise
:param weight_hal0: Weight of added halo noise color
:param sigma: Filter size for final Gaussian filter
'''
weight_r = [251 / 255, 141 / 255, 177 / 255]
weight_g = [249 / 255, 238 / 255, 195 / 255]
weight_b = [246 / 255, 238 / 255, 147 / 255]
# num
if brightness_factor >= 0.2:
num = random.randint(1, 2)
else:
num = random.randint(0, 2)
w0_a = random.randint(w / 2 - int(w / 8), w / 2 + int(w / 8))
h0_a = random.randint(h / 2 - int(h / 8), h / 2 + int(h / 8))
center_a = [w0_a, h0_a]
wei_dia_a = 0.75 + (1.0 - 0.75) * random.random()
dia_a = min(h, w) * wei_dia_a
Y_a, X_a = np.ogrid[:h, :w]
dist_from_center_a = np.sqrt((X_a - center_a[0]) ** 2 + (Y_a - center_a[1]) ** 2)
circle_a = dist_from_center_a <= (int(dia_a / 2))
mask_a = np.zeros((h, w))
mask_a[circle_a] = np.mean(img) # np.multiply(A[0], (1 - t))
center_b = center_a
Y_b, X_b = np.ogrid[:h, :w]
dist_from_center_b = | np.sqrt((X_b - center_b[0]) ** 2 + (Y_b - center_b[1]) ** 2) | numpy.sqrt |
import warnings
from logging import getLogger
from typing import Callable, Union, Iterable, Tuple, Sequence
import numpy as np
from matplotlib.axes import Axes
from matplotlib.colors import to_rgba
from pandas import DataFrame
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from seaborn.utils import iqr, remove_na
from seaborn.categorical import _CategoricalPlotter, _CategoricalScatterPlotter
__all__ = ["half_violinplot", "stripplot", "distplot"]
log = getLogger(__name__)
class _StripPlotter(_CategoricalScatterPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(
self,
x,
y,
hue,
data,
order,
hue_order,
jitter,
dodge,
orient,
color,
palette,
width,
move,
):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.dodge = dodge
self.width = width
self.move = move
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and dodge:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None or not self.dodge:
if self.hue_names is None:
hue_mask = np.ones(group_data.size, np.bool)
else:
hue_mask = np.array(
[h in self.hue_names for h in self.plot_hues[i]],
np.bool,
)
# Broken on older numpys
# hue_mask = np.in1d(self.plot_hues[i], self.hue_names)
strip_data = group_data[hue_mask]
# Plot the points in centered positions
cat_pos = self.move + np.ones(strip_data.size) * i
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
strip_data = group_data[hue_mask]
# Plot the points in centered positions
center = i + offsets[j]
cat_pos = self.move + np.ones(strip_data.size) * center
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.add_legend_data(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _Half_ViolinPlotter(_CategoricalPlotter):
def __init__(
self,
x,
y,
hue,
data,
order,
hue_order,
bw,
cut,
scale,
scale_hue,
alpha,
gridsize,
width,
inner,
split,
dodge,
orient,
linewidth,
color,
palette,
saturation,
offset,
):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
self.dodge = dodge
self.offset = offset
self.alpha = alpha
if inner is not None:
if not any(
[
inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point"),
]
):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) < 2:
msg = "There must be at least two hue levels to use `split`.'"
raise ValueError(msg)
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.0]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.0]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.0]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append( | np.array([1.0]) | numpy.array |
import sys
import json
import pickle
import random
from pathlib import Path
import csv
import hashlib
import numpy as np
import open3d as o3d
from loguru import logger as logging
from tools.tests.ray_ground_filter import RayGroundFilter
from tools.tests.object_utils import Box, ObjectWithBox, ObjectManipulator, VisualizerSequence
# set seed for debug
seed = random.randrange(sys.maxsize)
# seed = 1000
random.seed(seed)
logging.info('Random seed: {}'.format(seed))
class SceneGenerator(object):
def __init__(self, cloud_data_folder, output_folder):
self.cloud_data_folder = cloud_data_folder
self.output_folder = output_folder
self.output_cloud_file = None # path to save output cloud .bin file
self.output_label_file = None # path to save output label .txt file
self.label_data_dict = None # label data dict of the original scene
self.scene_labels = None # label data dict of the generated scene
self.output_file_name = None
self.cloud = None # cloud as numpy ndarray type
self.pcd = None # cloud as Open3d type
self.scene_points = None # generated scene cloud as numpy ndarray type
self.point_distance_buffer = None
self.lidar_mask_buffer = None
self.selected_objects = list()
self.labels_of_objects = list()
self.labels_of_valid_objects = list()
self.object_manipulator = None
self.create_object_manipulator()
# num of each classes in a scene
self.num_of_objects = {'Car': 15, 'Truck': 5, 'Tricar': 5, 'Cyclist': 10, 'Pedestrian': 10}
# radial distance range of each classes in a scene, can be set as absolute or relative
# -- absolute
# self.range_of_distances = {'Car': [5.0, 100.0],
# 'Truck': [8.0, 120.0],
# 'Tricar': [5.0, 80.0],
# 'Cyclist': [5.0, 80.0],
# 'Pedestrian': [5.0, 60.0]}
# -- relative
self.range_of_distances = {'Car': [-10.0, 10.0],
'Truck': [-10.0, 10.0],
'Tricar': [-10.0, 10.0],
'Cyclist': [-10.0, 10.0],
'Pedestrian': [-10.0, 10.0]}
# additional random rotation angle range applied to each object
self.additional_rotation_range = 30.0 # deg
# elevation angle range set to each object to control its height
self.elevation_angle_range = 2.0 # deg
def create_object_manipulator(self):
# configure the object manipulator and the transform between the original lidar frame and current frame
origin_lidar_rotation = [3.13742, -3.1309, 3.14101]
origin_lidar_location = [-2.87509, -0.00462392, 1.83632]
self.object_manipulator = ObjectManipulator()
self.object_manipulator.init_lidar_transform(origin_lidar_rotation, origin_lidar_location)
# configure lidar elevation angle distribution
lidar_elevation_file = 'test_data/VLS-128-Figure9-8-Azimuth Offsets by Elevation.csv'
azimuth_angle_increment = 0.2 # deg
ring_index = list()
elevation_angle = list()
with open(lidar_elevation_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_num = 0
for row in csvreader:
if line_num > 0:
ring_index.append(int(row[0]))
elevation_angle.append(float(row[1]))
line_num += 1
self.object_manipulator.init_lidar_param(ring_index, elevation_angle, azimuth_angle_increment)
def remove_original_objects(self):
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(self.cloud[:, :3])
# -- iterate for each object
objs = self.label_data_dict['gts']
for p in objs:
# ignore DontCare objects
if p['class_name'] == 'DontCare':
continue
# construct 3d box
bbox = o3d.geometry.OrientedBoundingBox(
center=p['location'],
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(p['rotation']),
extent=p['dimension'],
)
# crop the object points
object_points = self.pcd.crop(bbox)
# check if not empty
if np.asarray(object_points.points).shape[0] == 0:
continue
# remove the object with distance threshold
dist_threshold = 1e-3 # m, it should be very small, since ideally the min distance is 0
non_ground_to_object_distance = np.asarray(self.pcd.compute_point_cloud_distance(object_points))
remain_points_mask = non_ground_to_object_distance > dist_threshold
remain_points_indices = np.nonzero(remain_points_mask)
self.pcd = self.pcd.select_down_sample(remain_points_indices[0])
def random_select_candidate_objects(self, objects_data):
class_num = len(objects_data)
candidate_object_num = 0
added_object_points_for_collision_test = o3d.geometry.PointCloud()
logging.info('Random selecting candidate objects...')
for i in range(class_num):
class_name = objects_data[i]['class_name']
samples = random.sample(objects_data[i]['objects'], self.num_of_objects[class_name])
# randomly place the object with polar coordinates
for sample in samples:
# manipulate the object
self.object_manipulator.init_object(sample, class_name)
# -- first mirror the object points
self.object_manipulator.mirror_object_points()
# -- then rotate and move in lidar frame
self.object_manipulator.lidar_rotate_and_move_object(
rotation_z_angle=random.uniform(0.0, 360.0),
radial_distance=random.uniform(self.range_of_distances[class_name][0],
self.range_of_distances[class_name][1]),
absolute_distance=False)
# -- then rotate and elevate itself
self.object_manipulator.self_rotate_and_elevate_object(
rotation_z_angle=random.uniform(-self.additional_rotation_range, self.additional_rotation_range),
elevation_angle=random.uniform(-self.elevation_angle_range, self.elevation_angle_range))
# -- then check if collision happens with previous boxes
if self.check_box_collision(self.object_manipulator.object.box3d,
added_object_points_for_collision_test):
continue
# -- finally resample with the lidar
if not self.object_manipulator.resample_by_lidar():
# if failed to get resampled object, skip
continue
# add object to list
self.selected_objects.append({'class_name': class_name, 'object_data': self.object_manipulator.object})
# debug
# object_pcd = o3d.geometry.PointCloud()
# object_pcd.points = o3d.utility.Vector3dVector(self.object_manipulator.object.cloud_points)
# o3d.visualization.draw_geometries([object_pcd])
# add label to the object
self.labels_of_objects.append(self.object_manipulator.get_object_label())
# update object id
candidate_object_num += 1
logging.info('Candidate object num: {}'.format(candidate_object_num))
def sort_by_distance(self):
object_list_for_sort = list()
for sample, label in zip(self.selected_objects, self.labels_of_objects):
# compute object's distance to lidar
location_homogeneous = np.append(sample['object_data'].box3d.location, 1)
location_in_lidar_frame = np.matmul(
self.object_manipulator.transform_current_to_origin_lidar, location_homogeneous)
distance_to_lidar = np.linalg.norm(location_in_lidar_frame[:2])
object_list_for_sort.append([sample, label, distance_to_lidar])
object_list_for_sort.sort(key=lambda object: object[2])
for i in range(len(object_list_for_sort)):
self.selected_objects[i] = object_list_for_sort[i][0]
self.labels_of_objects[i] = object_list_for_sort[i][1]
def add_object_to_scene(self, object, points_num_threshold=50):
object_points = object['object_data'].cloud_points.copy()
object_pcd = o3d.geometry.PointCloud()
object_pcd.points = o3d.utility.Vector3dVector(object_points[:, :3])
# transform the object points to lidar frame
# -- first recover translation from the location of 3D box
object_pcd.translate(object['object_data'].box3d.location)
# -- then apply the transformation to lidar frame
object_pcd.transform(self.object_manipulator.transform_current_to_origin_lidar)
# -- finally update points in numpy ndarray
object_points = np.asarray(object_pcd.points)
# add to all cloud with occlusion handling
# -- backup buffer in case not enough valid points
point_distance_buffer_backup = self.point_distance_buffer.copy()
valid_points_num = \
self.handle_occlusion(object_points, use_lidar_mask=False, update_lidar_mask=True)
if valid_points_num <= points_num_threshold:
# restore previous point_distance_buffer
self.point_distance_buffer = point_distance_buffer_backup
return valid_points_num
def check_box_collision(self, box3d, added_objects_points):
# construct open3d box
current_box = o3d.geometry.OrientedBoundingBox(
center=box3d.location,
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(box3d.rotation),
extent=box3d.dimension,
)
# check with previously added box points
if np.asarray(added_objects_points.points).shape[0] > 0:
intersection_points = added_objects_points.crop(current_box)
if np.asarray(intersection_points.points).shape[0] > 0:
return True
# randomly generate points of current box
current_box_pcd = o3d.geometry.PointCloud()
step = 0.2
box_points = np.mgrid[-0.5 * box3d.dimension[0]:0.5 * box3d.dimension[0]:step,
-0.5 * box3d.dimension[1]:0.5 * box3d.dimension[1]:step,
-0.5 * box3d.dimension[2]:0.5 * box3d.dimension[2]:step].T.reshape(-1, 3)
current_box_pcd.points = o3d.utility.Vector3dVector(box_points)
# -- transform with box's location and rotation
transformation = np.eye(4)
transformation[:3, :3] = o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(box3d.rotation)
transformation[:3, 3] = box3d.location
current_box_pcd.transform(transformation)
# check with ego box
ego_box = o3d.geometry.OrientedBoundingBox(
center=self.object_manipulator.transform_origin_lidar_to_current[:3, 3],
R=self.object_manipulator.transform_origin_lidar_to_current[:3, :3],
extent=np.array([4.5, 1.8, 1.6]) * 1.2,
)
# -- crop and check if intersecting
intersection_points = current_box_pcd.crop(ego_box)
if np.asarray(intersection_points.points).shape[0] > 0:
return True
# debug
# current_box_pcd.paint_uniform_color([1.0, 0.0, 0.0])
# added_objects_points.paint_uniform_color([0.0, 1.0, 0.0])
# o3d.visualization.draw_geometries([current_box_pcd, added_objects_points])
# add points of current box to previous cloud
added_objects_points += current_box_pcd
return False
def handle_occlusion(self, points, use_lidar_mask=False, update_lidar_mask=False):
azimuth_angle_start = -np.pi
XYZ_range_distances = np.linalg.norm(points[:, :3], axis=1)
XY_range_distances = np.linalg.norm(points[:, :2], axis=1)
azimuth_angles = np.arctan2(points[:, 1], points[:, 0])
elevation_angles = np.arctan2(points[:, 2], XY_range_distances)
valid_points_num = 0
for i in range(points.shape[0]):
# compute azimuth index
azimuth_index = \
np.floor((azimuth_angles[i] - azimuth_angle_start) /
np.radians(self.object_manipulator.lidar_azimuth_angle_increment)).astype('int')
# find elevation index
elevation_index = min(range(len(self.object_manipulator.lidar_elevation_angle)),
key=lambda j:
abs(self.object_manipulator.lidar_elevation_angle[j] -
np.degrees(elevation_angles[i])))
# update the distance if not masked and assigned yet
lidar_mask = False if not use_lidar_mask else self.lidar_mask_buffer[azimuth_index, elevation_index]
point_distance = self.point_distance_buffer[azimuth_index, elevation_index]
if not lidar_mask and point_distance < 0:
# update distance
self.point_distance_buffer[azimuth_index, elevation_index] = XYZ_range_distances[i]
if update_lidar_mask:
# update mask
self.update_lidar_mask(self.lidar_mask_buffer, azimuth_index, elevation_index, mask_window_size=5)
# increase valid num
valid_points_num += 1
return valid_points_num
def update_lidar_mask(self, lidar_mask_buffer, azimuth_index, elevation_index, mask_window_size=3):
azimuth_angle_num = self.object_manipulator.lidar_azimuth_angle_num
min_azimuth_index = azimuth_index - mask_window_size
max_azimuth_index = azimuth_index + mask_window_size
azimuth_index_range = np.array(range(min_azimuth_index, max_azimuth_index)) % azimuth_angle_num
elevation_angle_num = self.object_manipulator.lidar_elevation_angle_num
min_elevation_index = elevation_index - mask_window_size
max_elevation_index = elevation_index + mask_window_size
elevation_index_range = np.array(range(min_elevation_index, max_elevation_index)) % elevation_angle_num
lidar_mask_buffer[np.ix_(azimuth_index_range, elevation_index_range)] = True
def add_background_points_to_scene(self, valid_object_indices, background_pcd):
# first remove points in objects' boxes
for valid_object_index in valid_object_indices:
# construct 3d box
box_info = self.selected_objects[valid_object_index]['object_data']
bbox = o3d.geometry.OrientedBoundingBox(
center=box_info.box3d.location,
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(box_info.box3d.rotation),
extent=box_info.box3d.dimension,
)
# crop the object points
object_points = background_pcd.crop(bbox)
if np.asarray(object_points.points).shape[0] == 0:
# no points to remove
continue
# remove the object with distance threshold
dist_threshold = 1e-3 # m, it should be very small, since ideally the min distance is 0
non_ground_to_object_distance = np.asarray(background_pcd.compute_point_cloud_distance(object_points))
remain_points_mask = non_ground_to_object_distance > dist_threshold
remain_points_indices = np.nonzero(remain_points_mask)
background_pcd = background_pcd.select_down_sample(remain_points_indices[0])
# then transform to lidar frame
background_pcd.transform(self.object_manipulator.transform_current_to_origin_lidar)
# then handle the occlusion when add it to buffer using lidar mask created by objects
background_points = np.asarray(background_pcd.points)
valid_background_points_num = \
self.handle_occlusion(background_points, use_lidar_mask=True, update_lidar_mask=False)
logging.info('All background points: {}'.format(background_points.shape[0]))
logging.info('Valid background points in the scene: {}'.format(valid_background_points_num))
def generate_scene(self, label_data_dict, objects_data, with_ground=True):
self.label_data_dict = label_data_dict.copy()
# set output file
self.output_file_name = Path(label_data_dict['path']).name
# load cloud data
cloud_path = Path(self.cloud_data_folder).joinpath(self.label_data_dict['path']).as_posix()
cloud = np.fromfile(cloud_path, dtype=np.float32)
cloud = cloud.reshape((-1, 4))
# remove points far under or over ground
z_filt = np.logical_and(cloud[:, 2] > -10.0, cloud[:, 2] < 30.0)
self.cloud = cloud[z_filt, :]
# remove objects from the original cloud
self.remove_original_objects()
# randomly select objects
self.random_select_candidate_objects(objects_data)
# sort objects by radial distance, in order to add object from close to far
self.sort_by_distance()
# combine all objects
# -- construct a 2D polar buffer for occlusion handling
# -- azimuth angle range: -pi ~ pi
azimuth_angle_start = -np.pi
azimuth_angle_num = self.object_manipulator.lidar_azimuth_angle_num
elevation_angle_num = self.object_manipulator.lidar_elevation_angle_num
# -- create XYZ-range distance buffer for each ray
self.point_distance_buffer = np.full((azimuth_angle_num, elevation_angle_num), -1.0)
# -- create lidar mask for handling occlusion of objects. True means occupied by objects
self.lidar_mask_buffer = np.full((azimuth_angle_num, elevation_angle_num), False)
logging.info('Adding objects...')
valid_object_indices = list()
for i in range(len(self.selected_objects)):
object = self.selected_objects[i]
valid_points_num_threshold = 50
valid_points_num = self.add_object_to_scene(object, points_num_threshold=valid_points_num_threshold)
if valid_points_num > valid_points_num_threshold:
valid_label = self.labels_of_objects[i]
valid_label['num_points'] = valid_points_num
self.labels_of_valid_objects.append(valid_label)
# logging.info("Valid object #{}, points: {}".format(i, valid_points_num))
logging.info('Valid objects in the scene: {}'.format(len(self.labels_of_valid_objects)))
logging.info('Objects points in the scene: {}'.format(np.count_nonzero(self.point_distance_buffer > 0)))
if not with_ground:
# split ground and non-ground points
rgf = RayGroundFilter(refinement_mode='nearest_neighbor')
ground_points, non_ground_points = rgf.filter(np.asarray(self.pcd.points))
non_ground_pcd = o3d.geometry.PointCloud()
non_ground_pcd.points = o3d.utility.Vector3dVector(non_ground_points[:, :3])
# show results for debug
# o3d.visualization.draw_geometries([non_ground_pcd])
# add the non-ground points
self.add_background_points_to_scene(valid_object_indices, non_ground_pcd)
else:
# add all background points
self.add_background_points_to_scene(valid_object_indices, self.pcd)
# convert points from polar coordinates to XYZ
scene_points_list = list()
for azimuth_index, elevation_index in np.ndindex(self.point_distance_buffer.shape):
# ignore empty buffer
xyz_range_distance = self.point_distance_buffer[azimuth_index, elevation_index]
if xyz_range_distance < 0:
continue
# compute point coordinates
azimuth_angle = azimuth_angle_start + \
azimuth_index * np.radians(self.object_manipulator.lidar_azimuth_angle_increment)
elevation_angle = np.radians(self.object_manipulator.lidar_elevation_angle[elevation_index])
x = xyz_range_distance * np.cos(elevation_angle) * np.cos(azimuth_angle)
y = xyz_range_distance * np.cos(elevation_angle) * np.sin(azimuth_angle)
z = xyz_range_distance * np.sin(elevation_angle)
# add to buffer
scene_points_list.append([x, y, z])
self.scene_points = np.array(scene_points_list)
# transform back to current frame
scene_pcd = o3d.geometry.PointCloud()
scene_pcd.points = o3d.utility.Vector3dVector(self.scene_points[:, :3])
scene_pcd.transform(self.object_manipulator.transform_origin_lidar_to_current)
self.scene_points = np.asarray(scene_pcd.points)
def save_scene_cloud_to_file(self):
self.output_cloud_file = Path(self.output_folder).joinpath(self.output_file_name)
# add intensity
scene_points_with_intensity = | np.zeros((self.scene_points.shape[0], 4)) | numpy.zeros |
import numpy as np
from nnreg.model import Model
error_tolerance = 1e-10
def test_accuracy():
y_data = np.asarray([[0, 0, 1, 0]])
y_pred = | np.asarray([[1, 0, 0, 0]]) | numpy.asarray |
"""
Implementation of vanishing point detection methods.
"""
###########
# Imports #
###########
import cv2
import numpy as np
from abc import ABC, abstractmethod
from itertools import product
from skimage import color, feature, transform
from typing import List, Tuple, Union
##########
# Typing #
##########
Image = np.array
Point = Tuple
Line = Tuple[Point, Point]
Edgelets = Tuple[np.array]
Intermediate = List[Image]
###########
# Classes #
###########
class VPDetector(ABC):
"""
Abstract class used to define a vanishing point detector.
Each vanishing point detector must inherit this class.
"""
def __init__(self, export: bool = False):
"""
When the 'export' flag is True, the method returns the vanishing point
and all intermediate results.
When the 'export' flag is False, the method only returns the vanishing
point.
"""
super().__init__()
self.export = export
@abstractmethod
def detect(self, img: Image) -> Union[Point, Tuple[Point, Intermediate]]:
"""
Return the coordinates, in pixels, of the vanishing point of the image
(and eventually all intermediate results).
"""
pass
class VPClassic(VPDetector):
"""
Implementation of a vanishing point detector based on classic methods
(Canny's algorithm and Hough transform).
"""
def __init__(self, export=False):
super().__init__(export)
# Specific
def _preprocess(self, img: Image) -> Image:
# Bilateral filtering
d = 10
sigma_color = 10
sigma_space = 100
img = cv2.bilateralFilter(
img,
d,
sigma_color,
sigma_space
)
return img
def _edges(self, img: Image) -> Image:
# Canny's algorithm
median = np.median(img)
sigma = 0.33
lo_thresh = int(max(0, (1.0 - sigma) * median))
hi_thresh = int(min(255, (1.0 + sigma) * median))
filter_size = 3
img = cv2.Canny(
img,
lo_thresh,
hi_thresh,
apertureSize=filter_size,
L2gradient=True
)
return img
def _lines(self, img: Image) -> List[Line]:
h, w = img.shape
# Hough transform
rho = 1
theta = np.pi / 180
thresh = 10
min_line_length = w // 40
max_line_gap = w // 256
lines = cv2.HoughLinesP(
img,
rho,
theta,
thresh,
minLineLength=min_line_length,
maxLineGap=max_line_gap
)
# Get and filter end points
if lines is None:
return []
xy_thresh = w // 25
pts = []
for line in lines:
x1, y1, x2, y2 = line[0]
if abs(x1 - x2) > xy_thresh and abs(y1 - y2) > xy_thresh:
pts.append(((x1, y1), (x2, y2)))
return pts
def _intersections(self, lines: List[Line]) -> List[Point]:
if len(lines) == 0:
return []
inters = []
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
for i, l1 in enumerate(lines):
for l2 in lines[i + 1:]:
if not l1 == l2:
x_diff = (l1[0][0] - l1[1][0], l2[0][0] - l2[1][0])
y_diff = (l1[0][1] - l1[1][1], l2[0][1] - l2[1][1])
div = det(x_diff, y_diff)
if div == 0:
continue
d = (det(*l1), det(*l2))
x = det(d, x_diff) / div
y = det(d, y_diff) / div
inters.append((x, y))
return inters
# Abstract interface
def detect(self, img):
"""
This implementation is largely inspired from:
- https://github.com/SZanlongo/vanishing-point-detection
"""
# Color convention
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Image dimensions
h, w, _ = img.shape
# Grid dimensions
grid_size = min(h, w) // 5
rows = (h // grid_size) + 1
cols = (w // grid_size) + 1
# Initialize guess (cell with most intersections)
max_inter = 0
guess = (0.0, 0.0)
# Process
preprocessed = self._preprocess(img)
edges = self._edges(preprocessed)
lines = self._lines(edges)
intersections = self._intersections(lines)
# Output
if self.export:
vp = img.copy()
# Find best cell
if len(intersections) > 0:
for i, j in product(range(cols), range(rows)):
left = i * grid_size
right = (i + 1) * grid_size
bottom = j * grid_size
top = (j + 1) * grid_size
if self.export:
cv2.rectangle(
vp,
(left, bottom),
(right, top),
(0, 0, 255),
2
)
n_inter = 0
for x, y in intersections:
if left < x < right and bottom < y < top:
n_inter += 1
if n_inter > max_inter:
max_inter = n_inter
guess = ((left + right) / 2, (bottom + top) / 2)
if self.export:
# Draw lines
img_lines = img.copy()
if len(lines) > 0:
for p1, p2 in lines:
cv2.line(img_lines, p1, p2, (0, 0, 255), 2)
# Draw best cell
gx, gy = guess
mgs = grid_size / 2
rx1 = int(gx - mgs)
ry1 = int(gy - mgs)
rx2 = int(gx + mgs)
ry2 = int(gy + mgs)
cv2.rectangle(vp, (rx1, ry1), (rx2, ry2), (0, 255, 0), 3)
return guess, [preprocessed, edges, img_lines, vp]
return guess
class VPEdgelets(VPDetector):
"""
Implementation of a vanishing point detector based on edgelets and RANSAC.
Inspired from:
- Auto-Rectification of User Photos, Chaudhury et al.
- https://github.com/chsasank/Image-Rectification
"""
def __init__(self, export=False):
super().__init__(export)
# Specific
def _edgelets(self, img: Image) -> Edgelets:
"""
Compute edgelets of an image.
"""
# Edges
gray = color.rgb2gray(img)
edges = feature.canny(gray, sigma=0.3)
# Lines
lines = transform.probabilistic_hough_line(
edges,
line_length=3,
line_gap=2
)
locations, directions, strengths = [], [], []
for p0, p1 in lines:
p0, p1 = np.array(p0), np.array(p1)
locations.append((p0 + p1) / 2)
directions.append(p1 - p0)
strengths.append(np.linalg.norm(p1 - p0))
# Normalize
locations = np.array(locations)
directions = np.array(directions)
strengths = np.array(strengths)
norm = np.linalg.norm(directions, axis=1)[:, np.newaxis]
directions = np.divide(directions, norm)
return locations, directions, strengths
def _lines(self, edgelets: Edgelets) -> np.array:
"""
Compute lines from edgelets.
"""
locations, directions, _ = edgelets
normals = np.zeros_like(directions)
normals[:, 0] = directions[:, 1]
normals[:, 1] = -directions[:, 0]
p = -np.sum(locations * normals, axis=1)
lines = np.concatenate((normals, p[:, np.newaxis]), axis=1)
return lines
def _votes(self, edgelets: Edgelets, model: np.array) -> int:
"""
Compute votes for each of the edgelet against a given vanishing point.
"""
threshold_inlier = 5
vp = model[:2] / model[2]
locations, directions, strengths = edgelets
est_directions = locations - vp
dot_prod = | np.sum(est_directions * directions, axis=1) | numpy.sum |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test circuits and reference outputs for snapshot state instructions.
"""
from numpy import array, sqrt
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.providers.aer.extensions.snapshot import Snapshot
from qiskit.providers.aer.extensions.snapshot_statevector import *
def snapshot_state_circuits_deterministic(snapshot_label='snap',
snapshot_type='statevector',
post_measure=False):
"""Snapshot Statevector test circuits"""
circuits = []
num_qubits = 3
qr = QuantumRegister(num_qubits)
cr = ClassicalRegister(num_qubits)
regs = (qr, cr)
# State snapshot instruction acting on all qubits
snapshot = Snapshot(snapshot_label, snapshot_type, num_qubits)
# Snapshot |000>
circuit = QuantumCircuit(*regs)
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
# Snapshot |111>
circuit = QuantumCircuit(*regs)
circuit.x(qr)
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
return circuits
def snapshot_state_counts_deterministic(shots):
"""Snapshot Statevector test circuits reference counts."""
targets = []
# Snapshot |000>
targets.append({'0x0': shots})
# Snapshot |111>
targets.append({'0x7': shots})
return targets
def snapshot_state_pre_measure_statevector_deterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000>
targets.append( | array([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex) | numpy.array |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 <NAME>
pySME is a Python script to run R SME package
(https://cran.r-project.org/web/packages/sme/index.html). SME package generates
smoothing-splines mixed-effects models from metabolomics data. This script
follows methodology given by Berk et al. (2011) and utilizes bootstrapping to
approximate p-values. Running this script requires R with SME package installed.
"""
import os
import numpy as np
from scipy import interpolate
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
import statsmodels.stats.multitest as smm
import time
import copy
import smeutils
smePack = importr('sme', lib_loc="C:/Users/user/Documents/R/win-library/3.6")
statsPack = importr('stats')
# Settings ====================================================================
# Input files
info = pd.read_csv('./sme_info.csv')
data = pd.read_csv('./sme_data.csv')
info_arr = np.array(info)
data_fid = np.array(data.columns)
data_arr = np.array(data)
selIdx = np.arange(len(data_fid))
# Parameters
RUN = True
N = 12 # Number of subjects
t_n = 4 # Number of time points
iplN = 100 # Number of interpolated time points
n_bootstrap = 500 # Number of bootstrap sampling
selIdx = selIdx[:] # List of metabolites to analyze
relative = False # Scale data to initial values
correctOutlier = False
SAVE = False
USEMEAN = True
# SME Parameters
ctra = "AICc" # Criteria
init_l_mc = 1e-8 # Initial lambda_mu
init_l_vc = 1e-8 # Initial lambda_v
init_l_mt = 5e-8 # Initial lambda_mu
init_l_vt = 5e-8 # Initial lambda_v
maxIter = 100000 # Maximum iteration
deltaEM = 1e-3 # Threshold for expetation maximization
deltaNM = 1e-3 # Threshold for nelder mead
normalizeTime = True
seed = 1234 # RNG seed
showFig = False # Flag to plot figures
figSize = (20,16) # Size of figures
plotLegend = False # Flag to plot legend
colorMap = 'viridis' # kwarg for colormap
plotSMEMeanOnly = False # Only plot SME mean trace
mergePlot = True # Merge multiple plots
plotHeatmap = False # Plot heatmap comparing two data groups
t = np.array([1,3,5,7])
iplT = np.linspace(1, 7, iplN)
iplTIdx = np.empty(t_n)
for i in range(t_n):
iplTIdx[i] = np.where(iplT == t[i])[0]
iplTIdx = iplTIdx.astype(int)
sel = np.array([data_fid[selIdx]]).flatten()
#==============================================================================
np.random.seed(seed) # Set seed
#==============================================================================
if relative:
data = smeutils.normalizeData(data, N, t_n, data_fid)
#==============================================================================
t0 = time.time()
fulldataRaw = pd.concat([info,data], axis=1)
fulldataRaw = fulldataRaw.astype('float64')
fulldata = copy.deepcopy(fulldataRaw)
fulldata = fulldata.drop(fulldata.index[16]) # ind 5 has an outlier
if correctOutlier:
fulldata = smeutils.correctOutlier(fulldata, sel, t, t_n)
# Initialize ==================================================================
grp0_f = fulldata[(fulldata.grp == 0)]['ind']
grp1_f = fulldata[(fulldata.grp == 1)]['ind']
grp0 = np.unique(fulldata[(fulldata.grp == 0)]['ind'])
grp1 = np.unique(fulldata[(fulldata.grp == 1)]['ind'])
pandas2ri.activate()
fd_ri = pandas2ri.py2ri(fulldata)
fd_rigrp0 = fd_ri.rx(fd_ri.rx2("grp").ro == 0, True)
fd_rigrp1 = fd_ri.rx(fd_ri.rx2("grp").ro == 1, True)
fd_rigrp0tme = fd_rigrp0.rx2("tme")
fd_rigrp0ind = fd_rigrp0.rx2("ind")
fd_rigrp1tme = fd_rigrp1.rx2("tme")
fd_rigrp1ind = fd_rigrp1.rx2("ind")
ys0mu = np.empty((len(sel), iplN))
ys1mu = np.empty((len(sel), iplN))
ys0vHat = np.empty((len(sel), len(grp0), iplN))
ys1vHat = np.empty((len(sel), len(grp1), iplN))
l2 = np.empty(len(sel))
se = np.empty(len(sel))
se0 = np.empty((len(sel), len(grp0)))
se1 = np.empty((len(sel), len(grp1)))
sem = np.empty(len(sel))
tval = np.empty(len(sel))
ys0v = np.empty((len(sel), len(grp0), t_n))
ys1v = np.empty((len(sel), len(grp1), t_n))
ys0eta = np.empty((len(sel), len(grp0), t_n))
ys1eta = np.empty((len(sel), len(grp1), t_n))
ys0mubs = np.empty((n_bootstrap, len(sel), iplN))
ys1mubs = np.empty((n_bootstrap, len(sel), iplN))
ys0vHatbs = np.empty((n_bootstrap, len(sel), len(grp0), iplN))
ys1vHatbs = np.empty((n_bootstrap, len(sel), len(grp1), iplN))
l2bs = np.empty((n_bootstrap, len(sel)))
sebs = np.empty((n_bootstrap, len(sel)))
se0bs = np.empty((n_bootstrap, len(sel), len(grp0)))
se1bs = np.empty((n_bootstrap, len(sel), len(grp1)))
sembs = np.empty((n_bootstrap, len(sel)))
tvalbs = np.empty((n_bootstrap, len(sel)))
pval = np.empty(len(sel))
t1 = time.time()
print(t1 - t0)
# SME =========================================================================
if RUN:
for m_i in range(len(sel)):
fd_rigrp0obj = fd_rigrp0.rx2(sel[m_i])
fd_rigrp1obj = fd_rigrp1.rx2(sel[m_i])
fit0 = smePack.sme(fd_rigrp0obj,
fd_rigrp0tme,
fd_rigrp0ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mc,
initial_lambda_v=init_l_mc,
normalizeTime=normalizeTime)
fit1 = smePack.sme(fd_rigrp1obj,
fd_rigrp1tme,
fd_rigrp1ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mt,
initial_lambda_v=init_l_vt,
normalizeTime=normalizeTime)
fit0coef = np.array(fit0.rx2('coefficients'))
fit1coef = np.array(fit1.rx2('coefficients'))
spl0mu = interpolate.CubicSpline(t, fit0coef[0], bc_type='natural')
ys0mu[m_i] = spl0mu(iplT)
spl1mu = interpolate.CubicSpline(t, fit1coef[0], bc_type='natural')
ys1mu[m_i] = spl1mu(iplT)
l2[m_i] = np.sqrt(np.trapz(np.square(ys0mu[m_i] - ys1mu[m_i]), x=iplT))
for g0 in range(len(grp0)):
spl0 = interpolate.CubicSpline(t, fit0coef[g0 + 1] + fit0coef[0], bc_type='natural')
ys0vHat[m_i][g0] = spl0(iplT)
ys0v[m_i][g0] = ys0mu[m_i][iplTIdx] - ys0vHat[m_i][g0][iplTIdx]
ys0eta[m_i][g0] = fulldataRaw.loc[fulldataRaw.ind == grp0[g0], sel[m_i]] - ys0vHat[m_i][g0][iplTIdx]
se0[m_i][g0] = np.trapz(np.square(ys0mu[m_i] - ys0vHat[m_i][g0]), x=iplT)
for g1 in range(len(grp1)):
spl1 = interpolate.CubicSpline(t, fit1coef[g1 + 1] + fit1coef[0], bc_type='natural')
ys1vHat[m_i][g1] = spl1(iplT)
ys1v[m_i][g1] = ys1mu[m_i][iplTIdx] - ys1vHat[m_i][g1][iplTIdx]
ys1eta[m_i][g1] = fulldataRaw.loc[fulldataRaw.ind == grp1[g1], sel[m_i]] - ys1vHat[m_i][g1][iplTIdx]
se1[m_i][g1] = np.trapz(np.square(ys1mu[m_i] - ys1vHat[m_i][g1]), x=iplT)
se[m_i] = np.sqrt(np.mean(se0[m_i])/len(grp0) + np.mean(se1[m_i])/len(grp1))
sem = 0.
tval = np.divide(l2, se + sem)
ys0vFlat = ys0v.reshape((ys0v.shape[0], -1))
ys0etaFlat = ys0eta.reshape((ys0eta.shape[0], -1))
ys0etaFlat = np.delete(ys0etaFlat, 13, 1) # ind 5 has an outlier
ys1vFlat = ys1v.reshape((ys1v.shape[0], -1))
ys1etaFlat = ys1eta.reshape((ys1eta.shape[0], -1))
t2 = time.time()
print(t2 - t1)
# Bootstrapping ===============================================================
fulldataS = []
for bcount in range(n_bootstrap):
print("Bootstrap run: " + str(bcount))
fulldataC = copy.deepcopy(fulldataRaw)
for m_i in range(len(sel)):
if USEMEAN:
for Di in range(N):
ysmuMean = (ys0mu[m_i][iplTIdx] + ys1mu[m_i][iplTIdx])/2
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
else:
ct_rand = np.random.rand()
for Di in range(N):
if ct_rand < 0.5:
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
else:
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys1mu[m_i][iplTIdx]
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys1mu[m_i][iplTIdx]
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
fulldataC = fulldataC.drop(fulldataC.index[16]) # ind 5 has an outlier
fulldataS.append(fulldataC)
fd_ri = pandas2ri.py2ri(fulldataC)
fd_rigrp0 = fd_ri.rx(fd_ri.rx2("grp").ro == 0, True)
fd_rigrp1 = fd_ri.rx(fd_ri.rx2("grp").ro == 1, True)
for m_i in range(len(sel)):
fd_rigrp0objbs = fd_rigrp0.rx2(sel[m_i])
fd_rigrp1objbs = fd_rigrp1.rx2(sel[m_i])
fit0 = smePack.sme(fd_rigrp0objbs,
fd_rigrp0tme,
fd_rigrp0ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mc,
initial_lambda_v=init_l_vc,
normalizeTime=normalizeTime)
fit1 = smePack.sme(fd_rigrp1objbs,
fd_rigrp1tme,
fd_rigrp1ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mt,
initial_lambda_v=init_l_vt,
normalizeTime=normalizeTime)
fit0coefbs = np.array(fit0.rx2('coefficients'))
fit1coefbs = np.array(fit1.rx2('coefficients'))
spl0mubs = interpolate.CubicSpline(t, fit0coefbs[0], bc_type='natural')
ys0mubs[bcount][m_i] = spl0mubs(iplT)
spl1mubs = interpolate.CubicSpline(t, fit1coefbs[0], bc_type='natural')
ys1mubs[bcount][m_i] = spl1mubs(iplT)
l2bs[bcount][m_i] = np.sqrt(np.trapz(np.square(ys0mubs[bcount][m_i] - ys1mubs[bcount][m_i]), x=iplT))
for g0 in range(len(grp0)):
spl0bs = interpolate.CubicSpline(t, fit0coefbs[g0 + 1] + fit0coefbs[0], bc_type='natural')
ys0vHatbs[bcount][m_i][g0] = spl0bs(iplT)
se0bs[bcount][m_i][g0] = np.trapz(np.square(ys0mubs[bcount][m_i] - ys0vHatbs[bcount][m_i][g0]), x=iplT)
for g1 in range(len(grp1)):
spl1bs = interpolate.CubicSpline(t, fit1coefbs[g1 + 1] + fit1coefbs[0], bc_type='natural')
ys1vHatbs[bcount][m_i][g1] = spl1bs(iplT)
se1bs[bcount][m_i][g1] = np.trapz(np.square(ys1mubs[bcount][m_i] - ys1vHatbs[bcount][m_i][g1]), x=iplT)
sebs[bcount][m_i] = np.sqrt(np.mean(se0bs[bcount][m_i])/len(grp0) + np.mean(se1bs[bcount][m_i])/len(grp1))
sembs = 0.
tvalbs[bcount] = np.divide(l2bs[bcount], sebs[bcount] + sembs)
t3 = time.time()
print(t3 - t2)
for m_i in range(len(sel)):
pval[m_i] = (tvalbs[:,m_i] >= tval[m_i]).sum()/n_bootstrap
pvalCorr = smm.multipletests(pval, alpha=0.05, method='fdr_bh')[1]
print('p-value: ' + str(len(np.where(pval <= 0.05)[0])))
print(np.where(pval <= 0.05)[0])
# Plotting ====================================================================
cmap1 = cm.get_cmap(colorMap, 2)
cmap2 = cm.get_cmap(colorMap, N)
cmap3 = cm.get_cmap(colorMap, len(sel))
cmap_grp0 = cm.get_cmap('viridis', len(grp0))
cmap_grp1 = cm.get_cmap('viridis', len(grp1))
def plotC(idx):
"""
Plots data points, individual, and mean curve of control group
:param idx: index of the selection
"""
fdgrp0tme_arr = np.array(fulldata[fulldata.grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldata[fulldata.grp == 0][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = | np.where(grp0_f == grp0[g0]) | numpy.where |
import os
import pycqed as pq
import unittest
import numpy as np
from scipy.spatial import ConvexHull
import adaptive
import pycqed.analysis.analysis_toolbox as a_tools
from pycqed.measurement import measurement_control
from pycqed.measurement.sweep_functions import (
None_Sweep,
None_Sweep_idx,
None_Sweep_With_Parameter_Returned,
)
import pycqed.measurement.detector_functions as det
from pycqed.instrument_drivers.physical_instruments.dummy_instruments import (
DummyParHolder,
)
from pycqed.measurement.optimization import nelder_mead, SPSA
from pycqed.utilities.learner1D_minimizer import (Learner1D_Minimizer,
mk_minimization_loss_func, mk_minimization_goal_func)
from pycqed.analysis import measurement_analysis as ma
from pycqed.utilities.get_default_datadir import get_default_datadir
from pycqed.measurement.hdf5_data import read_dict_from_hdf5
from qcodes.instrument.parameter import ManualParameter
from qcodes import station
class Test_MeasurementControl(unittest.TestCase):
@classmethod
def setUpClass(self):
self.station = station.Station()
self.MC = measurement_control.MeasurementControl(
"MC", live_plot_enabled=True, verbose=True
)
self.MC.station = self.station
self.station.add_component(self.MC)
self.mock_parabola = DummyParHolder("mock_parabola")
self.station.add_component(self.mock_parabola)
def setUp(self):
self.MC.soft_avg(1)
def test_soft_sweep_1D(self):
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("1D_soft")
dset = dat["dset"]
x = dset[:, 0]
xr = np.arange(len(x)) / 15
y = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0, :])
np.testing.assert_array_almost_equal(y1, y[1, :])
# Test that the return dictionary has the right entries
dat_keys = set(
[
"dset",
"opt_res",
"opt_res_dset",
"sweep_parameter_names",
"sweep_parameter_units",
"value_names",
"value_units",
]
)
self.assertEqual(dat_keys, set(dat.keys()))
self.assertEqual(dat["sweep_parameter_names"], ["pts"])
self.assertEqual(dat["sweep_parameter_units"], ["arb. unit"])
self.assertEqual(dat["value_names"], ["I", "Q"])
self.assertEqual(dat["value_units"], ["V", "V"])
def test_soft_sweep_1D_alt_shape(self):
# This is a generalization of a 1D sweep function where instead of
# a shape (2,) it has a shape (2,1). This isinconsistent with the
# N-D hard sweeps. and should be addressed
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft_diff_shape())
dat = self.MC.run("1D_soft")
dset = dat["dset"]
x = dset[:, 0]
xr = np.arange(len(x)) / 15
y = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0, :])
np.testing.assert_array_almost_equal(y1, y[1, :])
# Test that the return dictionary has the right entries
dat_keys = set(
[
"dset",
"opt_res",
"opt_res_dset",
"sweep_parameter_names",
"sweep_parameter_units",
"value_names",
"value_units",
]
)
self.assertEqual(dat_keys, set(dat.keys()))
self.assertEqual(dat["sweep_parameter_names"], ["pts"])
self.assertEqual(dat["sweep_parameter_units"], ["arb. unit"])
self.assertEqual(dat["value_names"], ["I", "Q"])
self.assertEqual(dat["value_units"], ["V", "V"])
@unittest.skipIf(True, "This test is currently broken")
def test_data_location(self):
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
self.MC.run("datadir_test_file")
# raises an error if the file is not found
ma.MeasurementAnalysis(label="datadir_test_file")
# change the datadir
test_dir2 = os.path.abspath(
os.path.join(os.path.dirname(pq.__file__), os.pardir, "data_test_2")
)
self.MC.datadir(test_dir2)
sweep_pts = np.linspace(0, 10, 30)
self.MC.set_sweep_function(None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
self.MC.run("datadir_test_file_2")
# raises an error if the file is not found
with self.assertRaises(Exception):
ma.MeasurementAnalysis(label="datadir_test_file_2")
ma.a_tools.datadir = test_dir2
# changing the dir makes it find the file now
ma.MeasurementAnalysis(label="datadir_test_file_2")
self.MC.datadir(get_default_datadir())
def test_hard_sweep_1D(self):
sweep_pts = np.linspace(0, 10, 5)
self.MC.set_sweep_function(None_Sweep(sweep_control="hard"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Hard())
dat = self.MC.run("1D_hard")
dset = dat["dset"]
x = dset[:, 0]
y = [np.sin(x / np.pi), np.cos(x / np.pi)]
y0 = dset[:, 1]
y1 = dset[:, 2]
np.testing.assert_array_almost_equal(x, sweep_pts)
np.testing.assert_array_almost_equal(y0, y[0])
np.testing.assert_array_almost_equal(y1, y[1])
d = self.MC.detector_function
self.assertEqual(d.times_called, 1)
def test_soft_sweep_2D(self):
sweep_pts = np.linspace(0, 10, 30)
sweep_pts_2D = np.linspace(0, 10, 5)
self.MC.set_sweep_function(None_Sweep(sweep_control="soft"))
self.MC.set_sweep_function_2D(None_Sweep(sweep_control="soft"))
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("2D_soft", mode="2D")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
xr = np.arange(len(sweep_pts) * len(sweep_pts_2D)) / 15
z = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
z0 = dset[:, 2]
z1 = dset[:, 3]
x_tiled = np.tile(sweep_pts, len(sweep_pts_2D))
y_rep = np.repeat(sweep_pts_2D, len(sweep_pts))
np.testing.assert_array_almost_equal(x, x_tiled)
np.testing.assert_array_almost_equal(y, y_rep)
np.testing.assert_array_almost_equal(z0, z[0, :])
np.testing.assert_array_almost_equal(z1, z[1, :])
def test_soft_sweep_2D_with_reading_of_set_parameter(self):
sweep_pts = np.linspace(0, 10, 30)
sweep_pts_2D = np.linspace(0, 10, 5)
self.MC.set_sweep_function(
None_Sweep_With_Parameter_Returned(sweep_control="soft")
)
self.MC.set_sweep_function_2D(
None_Sweep_With_Parameter_Returned(sweep_control="soft")
)
self.MC.set_sweep_points(sweep_pts)
self.MC.set_sweep_points_2D(sweep_pts_2D)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run("2D_soft", mode="2D")
dset = dat["dset"]
x = dset[:, 0]
y = dset[:, 1]
xr = np.arange(len(sweep_pts) * len(sweep_pts_2D)) / 15
z = np.array([np.sin(xr / np.pi), np.cos(xr / np.pi)])
z0 = dset[:, 2]
z1 = dset[:, 3]
# The +0.1 is to test if the return value is matching
x_tiled = np.tile(sweep_pts + 0.1, len(sweep_pts_2D))
y_rep = np.repeat(sweep_pts_2D + 0.1, len(sweep_pts))
np.testing.assert_array_almost_equal(x, x_tiled)
np.testing.assert_array_almost_equal(y, y_rep)
| np.testing.assert_array_almost_equal(z0, z[0, :]) | numpy.testing.assert_array_almost_equal |
import numpy as np
from mesonh_atm.mesonh_atmosphere import MesoNHAtmosphere
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
import modules.cloud as ModCloud
#Old Data without advection
path = "/net/skyscanner/volume1/data/mesoNH/ARM_OneHour3600files_No_Horizontal_Wind/"
mfiles = [path+"U0K10.1.min{:02d}.{:03d}_diaKCL.nc".format(minute, second)
for minute in range(1, 60)
for second in range(1, 61)]
mtstep = 1
atm = MesoNHAtmosphere(mfiles, 1)
font = {'size' : 26}
plt.rc('font', **font)
#######################################################################
########################### cloud example #############################
#######################################################################
# Example Data of two variables with the coordinates of a rough bounding box of a cloud
# RCT = liquid water content, WT = vertical wind
lwc_data=atm.data['RCT'][449:599,75:125,60:200,110:250]
zwind_data=atm.data['WT'][449:599,75:125,60:200,110:250]
ids,counter,clouds=ModCloud.cloud_segmentation(lwc_data)
clouds=list(set(clouds.values()))
length_point_clds = np.ndarray((0,1))
for each_cloud in clouds:
print(len(each_cloud.points))
temp = len(each_cloud.points)
length_point_clds = np.vstack((length_point_clds,temp))
# Get cloud with the biggest amount of points in the bounding box
cloud = clouds[np.argmax(length_point_clds)]
cloud.calculate_attributes(lwc_data,zwind_data)
lwc_cloud = np.zeros(lwc_data.shape)
for point in cloud.points:
lwc_cloud[point] = 1
#Coordinates of the rough bounding box of the example cloud
xr = np.arange(0.005 + 60*0.01, 0.005 + 200*0.01,0.01)
yr = np.arange(0.005 + 110*0.01, 0.005 + 250*0.01,0.01)
all_Zs = atm.data["VLEV"][:,0,0]
zr = all_Zs[75:125]
tr = np.arange(449,599)
origin_xy = [60,110]
zspan = np.arange(0,16)
# Plotting three different cross-sections including the center of geometry COG and the center of masses
# of the vertical wind and liquid water content
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,15].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,15].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,15]*0.01 + | np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01]) | numpy.array |
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
pd.options.mode.chained_assignment = None
class NominalACM(object):
"""
This class estimates term premium based on the paper 'pricing term structures with linear regressions'.
It takes the curve vertices and excess returns of positions from all of them and return the term premium and
risk neutral yields as object attributes.
"""
base_count_dict = {'daily': 252,
'monthly': 12,
'yearly': 1}
def __init__(self, curve, excess_returns, freq='daily', interpolation='pchip', n_factors=5, compute_miy=False):
"""
All inputs are saved as attributes.
:param curve: DataFrame with equally spaced vertices as columns
:param excess_returns: DataFrame with the excess returns of the vertices as columns
:param freq: 'daily', 'monthly' or 'yearly'. Frequency of observations.
:param interpolation: any interpolation method from the SciPy library.
:param n_factors: number of principal components in the analysis.
:param compute_miy: Boolean. If True, computes the model implied yield.
"""
self.curve = curve
self.excess_returns = excess_returns
self.curve_exp = np.log(1+curve)
self.freq = freq
self.interpolation_method = interpolation
self.n_factors = n_factors
self.n_tenors = excess_returns.shape[1]
self.tenors = excess_returns.columns
self.sample_size = curve.shape[0] - 1
self.base_count = self.base_count_dict[freq]
self.compute_miy = compute_miy
self._run_estimation()
def _run_estimation(self):
# Step 0 - get the PCA factor series
self.PCA_factors = self._get_pca_factors()
# Step 1 - VAR for the PCA equities
Mu_hat, Phi_hat, V_hat, Sigma_hat = self._estimate_factor_var()
# Step 2 - Excess return equation
beta_hat, a_hat, B_star_hat, sigma2_hat, c_hat = self._estimate_excess_return_equation(v_hat=V_hat)
# Step 3 - Estimate price of risk parameters
lambda_0_hat, lambda_1_hat = self._retrieve_lambda(beta_hat, a_hat, B_star_hat, Sigma_hat, sigma2_hat, c_hat)
# Step 4 - Equation for the Short Rate
delta_0_hat, delta_1_hat = self._estimate_short_rate_equation()
# Step 5 - Affine Recursions
# model implied yield
if self.compute_miy:
miy = self._affine_recursions(Mu_hat, Phi_hat, Sigma_hat, sigma2_hat, lambda_0_hat, lambda_1_hat,
delta_0_hat, delta_1_hat)
miy = pd.DataFrame(data=miy[:, 1:],
index=self.PCA_factors[1:].index,
columns=list(range(1, self.tenors.max() + 1)))
self.miy = np.exp(miy) - 1
else:
self.miy = None
# risk neutral yield
rny = self._affine_recursions(Mu_hat, Phi_hat, Sigma_hat, sigma2_hat, 0, 0, delta_0_hat, delta_1_hat)
rny = pd.DataFrame(data=rny[:, 1:],
index=self.PCA_factors[1:].index,
columns=list(range(1, self.tenors.max() + 1)))
self.rny = np.exp(rny) - 1
self.term_premium = ((1 + self.curve) / (1 + self.rny) - 1).dropna(how='all')
def _get_pca_factors(self):
pca = PCA(n_components=self.n_factors)
df_pca = pd.DataFrame(data=pca.fit_transform(self.curve_exp.values),
index=self.curve.index,
columns=['PC' + str(i) for i in range(1, self.n_factors + 1)])
return df_pca
def _estimate_factor_var(self):
Y = self.PCA_factors.iloc[1:]
Z = self.PCA_factors.iloc[:-1]
Z['const'] = 1
Z = Z[['const'] + ['PC' + str(x) for x in range(1, self.n_factors + 1)]].T
# The VAR(1) estimator is given by equation (3.2.10) from Lutkepohl's book.
mat_Z = np.matrix(Z)
mat_Y = np.matrix(Y).T
B_hat = np.dot(mat_Y, np.dot(mat_Z.T, np.linalg.inv(np.dot(mat_Z, mat_Z.T))))
# Computes matrices Mu and Phi of the VAR(1) of the paper.
Mu_hat = B_hat[:, 0]
Phi_hat = B_hat[:, 1:self.n_factors + 1]
# residuals matrix V_hat and the unbiased estimate of its covariance
V_hat = mat_Y - np.dot(B_hat, mat_Z)
Sigma_hat = np.dot((1 / (self.sample_size - self.n_factors - 1)), np.dot(V_hat, V_hat.T))
return Mu_hat, Phi_hat, V_hat, Sigma_hat
def _estimate_excess_return_equation(self, v_hat):
mat_rx = self.excess_returns.iloc[1:].values.T.astype(float)
Z = np.concatenate((np.ones((1, self.sample_size)), v_hat, np.matrix(self.PCA_factors.iloc[:-1]).T))
D_hat = np.dot(mat_rx, np.dot(Z.T, np.linalg.inv(np.dot(Z, Z.T))))
a_hat = D_hat[:, 0]
beta_hat = D_hat[:, 1:self.n_factors + 1].T
c_hat = D_hat[:, self.n_factors + 1:]
E_hat = mat_rx - np.dot(D_hat, Z)
sigma2_hat = np.trace(np.dot(E_hat, E_hat.T)) / (self.n_tenors * self.sample_size)
# Builds the estimate of the B* matrix, defined in equation (13) of the paper
B_star_hat = np.zeros((self.n_tenors, self.n_factors ** 2))
for i in range(0, self.n_tenors):
B_star_hat[i, :] = np.reshape(np.dot(beta_hat[:, i], beta_hat[:, i].T), (1, self.n_factors ** 2))
return beta_hat, a_hat, B_star_hat, sigma2_hat, c_hat
def _retrieve_lambda(self, beta_hat, a_hat, b_star_hat, Sigma_hat, sigma2_hat, c_hat):
lambda_0_hat = np.dot(np.linalg.inv(np.dot(beta_hat, beta_hat.T)),
np.dot(beta_hat,
a_hat + np.dot(0.5,
np.dot(b_star_hat,
np.reshape(Sigma_hat, (self.n_factors ** 2, 1)))
+ np.dot(sigma2_hat,
np.ones((self.n_tenors, 1))
)
)
)
)
lambda_1_hat = np.dot(np.dot(np.linalg.inv(np.dot(beta_hat, beta_hat.T)), beta_hat), c_hat)
return lambda_0_hat, lambda_1_hat
def _estimate_short_rate_equation(self):
X_star = self.PCA_factors
X_star['const'] = 1
X_star = X_star[['const'] + ['PC' + str(x) for x in range(1, self.n_factors + 1)]].values
r1 = np.dot(1/self.base_count, self.curve_exp.iloc[:, 0].values.T)
Delta_hat = np.dot(np.dot(np.linalg.inv(np.dot(X_star.T, X_star)), X_star.T), r1)
delta_0_hat = Delta_hat[0]
delta_1_hat = Delta_hat[1:self.n_factors + 1]
return delta_0_hat, delta_1_hat
def _affine_recursions(self, Mu_hat, Phi_hat, Sigma_hat, sigma2_hat, lambda_0_hat, lambda_1_hat, delta_0_hat,
delta_1_hat):
X_star = self.PCA_factors
X_star['const'] = 1
X_star = X_star[['const'] + ['PC' + str(x) for x in range(1, self.n_factors + 1)]].values
N_rec = self.tenors.max()
Bn = np.matrix(np.zeros((self.n_factors, N_rec + 1)))
Bn[:, 1] = -delta_1_hat.reshape((self.n_factors, 1))
for i in range(2, N_rec + 1):
Bn[:, i] = np.transpose(np.dot(Bn[:, i - 1].T, Phi_hat - lambda_1_hat) - delta_1_hat.T)
An = np.matrix(np.zeros((1, N_rec + 1)))
An[:, 1] = -delta_0_hat
for i in range(2, N_rec + 1):
An[:, i] = An[:, i - 1] + np.dot(np.transpose(Bn[:, i - 1]), Mu_hat - lambda_0_hat) + 0.5 * \
(np.dot(np.dot( | np.transpose(Bn[:, i - 1]) | numpy.transpose |
import pandas as pd
import sys
import numpy as np
import scipy as sp
import json
import os
from decimal import Decimal
import scipy.optimize as opt
from scipy.optimize import minimize, curve_fit
from scipy.special import erfc
from scipy.stats import crystalball
from scipy.signal import medfilt, find_peaks
import pygama.analysis.histograms as pgh
import pygama.utils as pgu
import pygama.analysis.peak_fitting as pga
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
plt.style.use('style.mplstyle')
def main():
## this code takes the peaks from thorium's first-pass calibration and fits them. the values from these fits are used to then do a non-linear, second-pass calibration.
peak_2615()
#peak_1765()
#peak_1460()
#peak_609()
#peak_352()
def peak_2615():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
David Radford's HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if np.any(bg_term < 0):
return np.zeros_like(x)
# compute the step and the low energy tail
step = a * hstep * erfc((x - mu) / (sigma * np.sqrt(2)))
le_tail = a * htail
le_tail *= erfc((x - mu) / (sigma * np.sqrt(2)) + sigma / (tau * np.sqrt(2)))
le_tail *= np.exp((x - mu) / tau)
le_tail /= (2 * tau * np.exp(-(sigma / (np.sqrt(2) * tau))**2))
# add up all the peak shape components
return (1 - htail) * gauss(x, mu, sigma, a) + bg_term + step + le_tail
hist, bins, var = pgh.get_hist(df['e_cal'], range=(2540,2680), dx=0.5)
pgh.plot_hist(hist, bins, var=hist, label="data")
pars, cov = pga.fit_hist(radford_peak, hist, bins, var=hist, guess=[2608.5, 1.05, 0.001, 0.02, 5, 1, 4000])
pgu.print_fit_results(pars, cov, radford_peak)
pgu.plot_func(radford_peak, pars, label="chi2 fit", color='red')
#x_vals = np.arange(2540,2680,0.5)
#plt.plot(x_vals, radford_peak(x_vals, 2608.5, 1.05, .001, 0.02, 5, 1, 4000))
FWHM = '%.2f' % Decimal(pars[1]*2)
FWHM_uncertainty = '%.2f' % Decimal(np.sqrt(cov[1][1])*2)
peak = '%.2f' % Decimal(pars[0])
peak_uncertainty = '%.2f' % Decimal(np.sqrt(cov[0][0]))
residual = '%.2f' % (2614.51 - float(peak))
chi_2_element_list = []
for i in range(len(hist)):
chi_2_element = abs((radford_peak(bins[i], *pars) - hist[i])**2/radford_peak(bins[i], *pars))
chi_2_element_list.append(chi_2_element)
chi_2 = sum(chi_2_element_list)
reduced_chi_2 = '%.2f' % Decimal(chi_2/len(hist))
print(reduced_chi_2)
label_01 = '2614.51 keV peak fit'
label_02 = 'FWHM = '+str(FWHM)+r' $\pm$ '+str(FWHM_uncertainty)
label_03 = 'Peak = '+str(peak)+r' $\pm$ '+str(peak_uncertainty)
label_04 = 'Residual = '+str(residual)+r' $\pm$ '+str(peak_uncertainty)
colors = ['red', 'red','red', 'red']
lines = [Line2D([0], [0], color=c, lw=2) for c in colors]
labels = [label_01, label_02, label_03, label_04]
plt.xlim(2540,2680)
plt.ylim(0,plt.ylim()[1])
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.title('Fit of First-Pass Kr83m Calibration Peak')
plt.tight_layout()
#plt.semilogy()
plt.legend(lines, labels, frameon=False, loc='upper right', fontsize='small')
plt.show()
def peak_1765():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
<NAME>'s HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if np.any(bg_term < 0):
return np.zeros_like(x)
# compute the step and the low energy tail
step = a * hstep * erfc((x - mu) / (sigma * np.sqrt(2)))
le_tail = a * htail
le_tail *= erfc((x - mu) / (sigma * np.sqrt(2)) + sigma / (tau * np.sqrt(2)))
le_tail *= np.exp((x - mu) / tau)
le_tail /= (2 * tau * np.exp(-(sigma / (np.sqrt(2) * tau))**2))
# add up all the peak shape components
return (1 - htail) * gauss(x, mu, sigma, a) + bg_term + step + le_tail
hist, bins, var = pgh.get_hist(df['e_cal'], range=(1740,1780), dx=0.5)
pgh.plot_hist(hist, bins, var=hist, label="data")
pars, cov = pga.fit_hist(radford_peak, hist, bins, var=hist, guess=[1761, 1.85, 0.001, 0.02, 5, 1, 4000])
pgu.print_fit_results(pars, cov, radford_peak)
pgu.plot_func(radford_peak, pars, label="chi2 fit", color='red')
#x_vals = np.arange(1740,1780,0.5)
#plt.plot(x_vals, radford_peak(x_vals, 1761, 1.85, .001, 0.02, 5, 1, 4000))
FWHM = '%.2f' % Decimal(pars[1]*2)
FWHM_uncertainty = '%.2f' % Decimal(np.sqrt(cov[1][1])*2)
peak = '%.2f' % Decimal(pars[0])
peak_uncertainty = '%.2f' % Decimal(np.sqrt(cov[0][0]))
residual = '%.2f' % (1764.49 - float(peak))
#chi_2_element_list = []
#for i in range(len(hist)):
#chi_2_element = abs((radford_peak(bins[i], *pars) - hist[i])**2/radford_peak(bins[i], *pars))
#chi_2_element_list.append(chi_2_element)
#chi_2 = sum(chi_2_element_list)
#reduced_chi_2 = '%.2f' % Decimal(chi_2/len(hist))
label_01 = '1764.49 keV peak fit'
label_02 = 'FWHM = '+str(FWHM)+r' $\pm$ '+str(FWHM_uncertainty)
label_03 = 'Peak = '+str(peak)+r' $\pm$ '+str(peak_uncertainty)
label_04 = 'Residual = '+str(residual)+r' $\pm$ '+str(peak_uncertainty)
colors = ['red', 'red','red', 'red']
lines = [Line2D([0], [0], color=c, lw=2) for c in colors]
labels = [label_01, label_02, label_03, label_04]
plt.xlim(1740,1780)
plt.ylim(0,plt.ylim()[1])
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.tight_layout()
#plt.semilogy()
plt.legend(lines, labels, frameon=False, loc='upper right', fontsize='small')
plt.show()
def peak_1460():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
tier_dir = os.path.expandvars(runDB["tier_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
#df = pd.read_hdf("{}/t2_run{}.h5".format(tier_dir,sys.argv[1]))
#df['e_cal'] = 0.4054761904761905 * df['e_ftp'] + 3.113095238095184
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
<NAME>'s HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if | np.any(bg_term < 0) | numpy.any |
import numpy as np
from keras.layers import Input, Bidirectional, LSTM, Embedding, Dense, Dropout
from keras.models import Model
from keras.utils import to_categorical
from sklearn.metrics import f1_score
from model.callbacks import F1score
from .data_utils import minibatches, pad_sequences
class BaseKerasModel(object):
"""Generic class for general methods that are not specific to NER"""
def __init__(self, config):
"""Defines self.config and self.logger
Args:
config: (Config instance) class with hyper parameters,
vocab and embeddings
"""
self.config = config
self.logger = config.logger
self.model = None
self.sess = None
self.saver = None
def batch_iter(self, train, batch_size, return_lengths=False):
"""
Creates a batch generator for the dataset
:param train: Dataset
:param batch_size: Batch Size
:param return_lengths: If True, generator returns sequence lengths. Used masking data during the evaluation step
:return: (number of batches in dataset, data generator)
"""
nbatches = (len(train) + batch_size - 1) // batch_size
def data_generator():
while True:
for i, (words, labels) in enumerate(minibatches(train, batch_size)):
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, 0)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,
nlevels=2)
else:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, 0)
if labels:
labels, _ = pad_sequences(labels, 0)
labels = [to_categorical(label, num_classes=self.config.ntags) for label in labels] # Change labels to one-hot
# build dictionary
inputs = {
"word_ids": np.asarray(word_ids),
}
if self.config.use_chars:
inputs["char_ids"] = np.asarray(char_ids)
if return_lengths:
yield(inputs, np.asarray(labels), sequence_lengths)
else:
yield (inputs, np.asarray(labels))
return (nbatches, data_generator())
def train(self, train, dev, show_history=False):
batch_size = self.config.batch_size
nbatches_train, train_generator = self.batch_iter(train, batch_size)
nbatches_dev, dev_generator = self.batch_iter(dev, batch_size)
_, f1_generator = self.batch_iter(dev, batch_size, return_lengths=True)
f1 = F1score(f1_generator, nbatches_dev, self.run_evaluate)
callbacks = self.gen_callbacks([f1])
history = self.model.fit_generator(generator=train_generator,
steps_per_epoch=nbatches_train,
validation_data=dev_generator,
validation_steps=nbatches_dev,
epochs=5,
callbacks=callbacks) #, nbatches_train
if show_history:
print(history.history['f1'])
pass
def predict_words(self, words_raw):
words = [self.config.processing_word(w) for w in words_raw]
if type(words[0]) == tuple:
words = zip(*words)
char_ids, word_ids = words
word_ids = np.asarray(word_ids)
s = word_ids.shape
word_ids = word_ids.reshape(-1, s[0])
inputs = [word_ids]
if self.config.use_chars:
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,
nlevels=1)
char_ids = np.asarray(char_ids)
s = char_ids.shape
char_ids = char_ids.reshape(-1, s[0], s[1])
inputs.append(char_ids)
#print(word_ids)
#print(char_ids)
one_hot_preds = self.model.predict_on_batch(inputs)
#print("One hot preds: ", one_hot_preds)
one_hot_preds = [a.flatten() for a in one_hot_preds.squeeze()] #Squeeze to remove unnecessary 1st dimension for batch size
#print("One hot preds: ", one_hot_preds)
pred_ids = np.argmax(one_hot_preds, axis=1)
#print("Pred ids: ", pred_ids)
preds = [self.idx_to_tag[idx] for idx in pred_ids]
return preds
def run_evaluate(self, data_generator, steps_per_epoch):
accs = []
label_true = []
label_pred = []
for i in range(steps_per_epoch):
#try:
x_true, y_true, sequence_lengths = next(data_generator)
y_pred = self.model.predict_on_batch(x_true)
for lab, lab_pred, length in zip(y_true, y_pred,
sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
lab = np.argmax(lab, axis=1)
lab_pred = np.argmax(lab_pred, axis=1)
accs += [a==b for (a, b) in zip(lab, lab_pred)]
label_true.extend(lab)
label_pred.extend(lab_pred)
label_true = np.asarray(label_true)
#print("Truths: ", label_true)
label_pred = np.asarray(label_pred)
#print("Preds: ", label_pred)
acc = | np.mean(accs) | numpy.mean |
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
"""Core integrated routines for detecting and characterizing crosstalk"""
import numpy as _np
from . import objects as _obj
from ... import objects as _pygobjs
from ... import io as _pygio
import pcalg
from gsq.ci_tests import ci_test_dis
import collections
from sympy import isprime
def tuple_replace_at_index(tup, ix, val):
return tup[:ix] + (val,) + tup[ix + 1:]
def load_pygsti_dataset(filename):
"""
Loads a pygsti dataset from file.
This is a wrapper that just checks the first line, and replaces it with the newer outcome specification
format if its the old type.
"""
try:
# file = open(filename, "r")
open(filename, "r")
except IOError:
print("File not found, or other file IO error.")
# lines = file.readlines()
# file.close()
# if lines[0] == "## Columns = 00 count, 01 count, 10 count, 11 count\n":
# lines[0] = "## Columns = 0:0 count, 0:1 count, 1:0 count, 1:1 count\n"
# file = open(filename, "w")
# file.writelines(lines)
# file.close()
data = _pygio.load_dataset(filename)
return data
def flatten(l):
"""
Flattens an irregualr list.
From https://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def form_ct_data_matrix(ds, number_of_regions, settings, filter_lengths=[]):
# This converts a DataSet to an array since the code below uses arrays
if type(ds) == _pygobjs.dataset.DataSet:
opstr = ds.keys()[0]
temp = ds.auxInfo[opstr]['settings']
num_settings = len(temp)
settings_shape = _np.shape(settings)
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
dscopy = ds.copy_nonstatic()
# filter out lengths not in filter_lengths
if len(filter_lengths) > 0:
for k in dscopy.keys():
if len(k) not in filter_lengths:
dscopy.remove([k])
dscopy.done_adding_data()
# num columns = number of settings + number of regions (b/c we assume one outcome per region)
#num_columns = num_settings + number_of_regions
num_data = len(dscopy.keys())
data = []
collect_settings = {key: [] for key in range(num_settings)}
for row in range(num_data):
opstr = dscopy.keys()[row]
templine_set = [0] * num_settings
settings_row = dscopy.auxInfo[opstr]['settings']
for key in settings_row:
if len(key) == 1: # single region/qubit gate
templine_set[key[0]] = settings_row[key]
collect_settings[key[0]].append(settings_row[key])
else: # two-region/two-qubit gate
print("Two qubit gate, not sure what to do!!") # TODO
return
outcomes_row = dscopy[opstr]
for outcome in outcomes_row:
templine_out = [0] * number_of_regions
if len(outcome[0]) == 1:
# outcomes labeled by bitstrings
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][0][r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
else:
# outcomes labeled by tuples of bits
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
for r in range(num_rep):
data.append(flattened_line)
#num_seqs = [len(set(collect_settings[i])) for i in range(num_settings)]
data = _np.asarray(data)
# if the dataset is specified by a string assume its a filename with a saved numpy array
elif type(ds) == str:
data = _np.loadtxt(ds)
data = data.astype(int)
data_shape = | _np.shape(data) | numpy.shape |
import matplotlib.pyplot as plt
import numpy as np
import skfuzzy as fuzz
import sys
from skfuzzy import control as ctrl
# Input Variables
hand_distance = ctrl.Antecedent(np.arange(0, 101, 1), 'hand_distance') # no negative distances => starting at 0
hand_to_shoulder_distance = ctrl.Antecedent(np.arange(-100, 101, 1), 'hand_to_shoulder_distance') # negative = hands are belows shoulders, positive = hands are above shoulders
hand_gradient = ctrl.Antecedent(np.arange(-100, 101, 1), 'hand_gradient') # negative = left hand lower, positive = right hand lower
# Output Variables
forward = ctrl.Consequent( | np.arange(0, 101, 1) | numpy.arange |
import numpy as np
import cv2
from skimage.feature import peak_local_max
NOCS_CAMERA_MAT = np.array([[591.0125 , 0. , 322.525 , 0. ],
[ 0. , 590.16775, 244.11084, 0. ],
[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]])
open_3d_lines = [
[0, 1],
[7,3],
[1, 3],
[2, 0],
[3, 2],
[0, 4],
[1, 5],
[2, 6],
# [4, 7],
[7, 6],
[6, 4],
[4, 5],
[5, 7],
]
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/utils/nocs_utils.py#L7
def load_depth(depth_path):
""" Load depth image from img_path. """
# depth_path = depth_path + '_depth.png'
# print("depth_path", depth_path)
depth = cv2.imread(depth_path, -1)
if len(depth.shape) == 3:
# This is encoded depth image, let's convert
# NOTE: RGB is actually BGR in opencv
depth16 = depth[:, :, 1]*256 + depth[:, :, 2]
depth16 = np.where(depth16==32001, 0, depth16)
depth16 = depth16.astype(np.uint16)
elif len(depth.shape) == 2 and depth.dtype == 'uint16':
depth16 = depth
else:
assert False, '[ Error ]: Unsupported depth type.'
return depth16
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/utils/nocs_utils.py#L24
def load_img_NOCS(rgm_img_path, depth_path):
left_img = cv2.imread(rgm_img_path)
depth = load_depth(depth_path)
depth_norm = np.array(depth, dtype=np.float32)/255.0
return left_img, depth_norm, depth
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/simnet/lib/net/post_processing/pose_outputs.py#L121
def find_nearest(peaks,value):
newList = np.linalg.norm(peaks-value, axis=1)
return peaks[np.argsort(newList)]
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/simnet/lib/net/post_processing/pose_outputs.py#L126
def extract_peaks_from_centroid_sorted(centroid_heatmap,min_confidence=0.15, min_distance=10):
peaks = peak_local_max(centroid_heatmap, min_distance=min_distance, threshold_abs=min_confidence)
peaks = find_nearest(peaks,[0,0])
return peaks
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/simnet/lib/net/post_processing/pose_outputs.py#L131
def extract_latent_emb_from_peaks(heatmap_output, peaks, latent_emb_output, scale_factor=8):
assert peaks.shape[1] == 2
latent_embeddings = []
indices = []
scores = []
for ii in range(peaks.shape[0]):
index = np.zeros([2])
index[0] = int(peaks[ii, 0] / scale_factor)
index[1] = int(peaks[ii, 1] / scale_factor)
index = index.astype(np.int)
latent_emb = latent_emb_output[index[0], index[1], :]
latent_embeddings.append(latent_emb)
indices.append(index*scale_factor)
scores.append(heatmap_output[peaks[ii, 0], peaks[ii, 1]])
return latent_embeddings, indices, scores
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/simnet/lib/net/post_processing/pose_outputs.py#L147
def extract_abs_pose_from_peaks(peaks, abs_pose_output, scale_factor=8):
assert peaks.shape[1] == 2
abs_poses = []
scales = []
for ii in range(peaks.shape[0]):
index = np.zeros([2])
index[0] = int(peaks[ii, 0] / scale_factor)
index[1] = int(peaks[ii, 1] / scale_factor)
index = index.astype(np.int)
abs_pose_values = abs_pose_output[index[0], index[1],:]
rotation_matrix = np.array([[abs_pose_values[0], abs_pose_values[1], abs_pose_values[2]],
[abs_pose_values[3], abs_pose_values[4], abs_pose_values[5]],
[abs_pose_values[6], abs_pose_values[7], abs_pose_values[8]]])
translation_vector = np.array([abs_pose_values[9], abs_pose_values[10], abs_pose_values[11]])
transformation_mat = np.eye(4)
transformation_mat[:3,:3] = rotation_matrix
transformation_mat[:3,3] = translation_vector
scale = abs_pose_values[12]
scale_matrix = np.eye(4)
scale_mat = scale*np.eye(3, dtype=float)
scale_matrix[0:3, 0:3] = scale_mat
scales.append(scale_matrix)
abs_poses.append((transformation_mat, scale_matrix))
return abs_poses
# Ref: https://github.com/zubair-irshad/CenterSnap/blob/5422258475c30c37807566c60996f4d8b3a810e7/simnet/lib/net/post_processing/pose_outputs.py#L177
def draw_peaks(centroid_target, peaks):
centroid_target = | np.clip(centroid_target, 0.0, 1.0) | numpy.clip |
import numpy as np
import time
import copy
from multiprocessing.pool import ThreadPool
from operator import itemgetter
from scipy.spatial.distance import cdist
from torch.nn.functional import normalize
from torch import Tensor
import torch
import torch.nn.functional as F
from lib.config import cfg
from sampler import Sampler
from lib.utils import log
class SubModSampler(Sampler):
def __init__(self, model, dataset, batch_size, ltl_log_ep=5):
super(SubModSampler, self).__init__(model, dataset)
self.batch_size = batch_size
self.index_set = range(0, len(self.dataset)) # It contains the indices of each image of the set.
self.ltl_log_ep = ltl_log_ep
f_acts = torch.tensor(self.final_activations)
p_log_p = F.softmax(f_acts, dim=1) * F.log_softmax(f_acts, dim=1)
H = -p_log_p.numpy()
self.H = np.sum(H,axis=1) # Compute entropy of all samples for an epoch.
dist = [1./len(self.dataset)]*len(self.dataset)
self.dist = np.array(dist)
def get_subset(self, detailed_logging=False):
set_size = len(self.index_set)
num_of_partitions = cfg.num_of_partitions
if set_size >= num_of_partitions*self.batch_size:
size_of_each_part = set_size / num_of_partitions
r_size = (size_of_each_part*self.ltl_log_ep)/self.batch_size
partitions = [self.index_set[k:k+size_of_each_part] for k in range(0, set_size, size_of_each_part)]
pool = ThreadPool(processes=len(partitions))
pool_handlers = []
for partition in partitions:
handler = pool.apply_async(get_subset_indices, args=(partition, self.penultimate_activations, self.final_activations,
self.H, self.batch_size, r_size, self.dist))
pool_handlers.append(handler)
pool.close()
pool.join()
intermediate_indices = []
dist_updated = []
for (k,handler) in enumerate(pool_handlers):
res = handler.get()
intermediate_indices.extend(res[0])
dist_k = res[1]
dist_updated.extend(dist_k[np.array(partitions[k])])
self.dist = | np.array(dist_updated) | numpy.array |
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import tensorflow as tf
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import gpflow
from gpflow import settings
from gpflow.conditionals import uncertain_conditional
from gpflow.conditionals import feature_conditional
from gpflow.quadrature import mvnquad
from gpflow.test_util import session_context
class MomentMatchingSVGP(gpflow.models.SVGP):
@gpflow.params_as_tensors
def uncertain_predict_f_moment_matching(self, Xmu, Xcov):
return uncertain_conditional(
Xmu, Xcov, self.feature, self.kern, self.q_mu, self.q_sqrt,
mean_function=self.mean_function, white=self.whiten,
full_cov_output=self.full_cov_output)
def uncertain_predict_f_monte_carlo(self, Xmu, Xchol, mc_iter=int(1e6)):
rng = np.random.RandomState(0)
D_in = Xchol.shape[0]
X_samples = Xmu + np.reshape(
Xchol[None, :, :] @ rng.randn(mc_iter, D_in)[:, :, None], [mc_iter, D_in])
F_mu, F_var = self.predict_f(X_samples)
F_samples = F_mu + rng.randn(*F_var.shape) * (F_var ** 0.5)
mean = np.mean(F_samples, axis=0)
covar = np.cov(F_samples.T)
return mean, covar
def gen_L(rng, n, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(rng, D_out, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)])
def mean_function_factory(rng, mean_function_name, D_in, D_out):
if mean_function_name == "Zero":
return gpflow.mean_functions.Zero(output_dim=D_out)
elif mean_function_name == "Constant":
return gpflow.mean_functions.Constant(c=rng.rand(D_out))
elif mean_function_name == "Linear":
return gpflow.mean_functions.Linear(
A=rng.rand(D_in, D_out), b=rng.rand(D_out))
else:
return None
class Data:
N = 7
N_new = 2
D_out = 3
D_in = 1
rng = np.random.RandomState(1)
X = np.linspace(-5, 5, N)[:, None] + rng.randn(N, 1)
Y = np.hstack([np.sin(X), np.cos(X), X**2])
Xnew_mu = rng.randn(N_new, 1)
Xnew_covar = np.zeros((N_new, 1, 1))
class DataMC1(Data):
Y = np.hstack([np.sin(Data.X), np.sin(Data.X) * 2, Data.X ** 2])
class DataMC2(Data):
N = 7
N_new = 5
D_out = 4
D_in = 2
X = Data.rng.randn(N, D_in)
Y = np.hstack([np.sin(X), np.sin(X)])
Xnew_mu = Data.rng.randn(N_new, D_in)
L = gen_L(Data.rng, N_new, D_in, D_in)
Xnew_covar = np.array([l @ l.T for l in L])
class DataQuadrature:
num_data = 10
num_ind = 10
D_in = 2
D_out = 3
H = 150
rng = np.random.RandomState(1)
Xmu = rng.randn(num_data, D_in)
L = gen_L(rng, num_data, D_in, D_in)
Xvar = np.array([l @ l.T for l in L])
Z = rng.randn(num_ind, D_in)
q_mu = rng.randn(num_ind, D_out)
q_sqrt = gen_q_sqrt(rng, D_out, num_ind, num_ind)
@classmethod
def tensors(cls, white, mean_name):
float_type = settings.float_type
Xmu = tf.placeholder(float_type, [cls.num_data, cls.D_in])
Xvar = tf.placeholder(float_type, [cls.num_data, cls.D_in, cls.D_in])
q_mu = tf.placeholder(float_type, [cls.num_ind, cls.D_out])
q_sqrt = tf.placeholder(float_type, [cls.D_out, cls.num_ind, cls.num_ind])
kern = gpflow.kernels.RBF(cls.D_in)
feat = gpflow.features.InducingPoints(cls.Z)
mean_function = mean_function_factory(cls.rng, mean_name, cls.D_in, cls.D_out)
effective_mean = mean_function or (lambda X: 0.0)
feed_dict = {
Xmu: cls.Xmu,
Xvar: cls.Xvar,
q_mu: cls.q_mu,
q_sqrt: cls.q_sqrt
}
def mean_fn(X):
mean, _ = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return mean + effective_mean(X)
def var_fn(X):
_, var = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return var
def mean_sq_fn(X):
mean, _ = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return (mean + effective_mean(X)) ** 2
Collection = namedtuple('QuadratureCollection',
'Xmu,Xvar,q_mu,q_sqrt,'
'kern,feat,mean_function,'
'feed_dict,mean_fn,'
'var_fn,mean_sq_fn')
return Collection(Xmu=Xmu,
Xvar=Xvar,
q_mu=q_mu,
q_sqrt=q_sqrt,
kern=kern,
feat=feat,
mean_function=mean_function,
feed_dict=feed_dict,
mean_fn=mean_fn,
var_fn=var_fn,
mean_sq_fn=mean_sq_fn)
MEANS = ["Constant", "Linear", "Zero", None]
@pytest.mark.parametrize('white', [True, False])
@pytest.mark.parametrize('mean', MEANS)
def test_no_uncertainty(white, mean):
with session_context() as sess:
m = mean_function_factory(Data.rng, mean, Data.D_in, Data.D_out)
k = gpflow.kernels.RBF(1, variance=Data.rng.rand())
model = MomentMatchingSVGP(
Data.X, Data.Y, k, gpflow.likelihoods.Gaussian(),
mean_function=m, Z=Data.X.copy(), whiten=white)
model.full_cov_output = False
gpflow.train.AdamOptimizer().minimize(model, maxiter=50)
mean1, var1 = model.predict_f(Data.Xnew_mu)
pred_mm = model.uncertain_predict_f_moment_matching(
tf.constant(Data.Xnew_mu), tf.constant(Data.Xnew_covar))
mean2, var2 = sess.run(pred_mm)
assert_almost_equal(mean1, mean2)
for n in range(Data.N_new):
assert_almost_equal(var1[n, :], var2[n, ...])
@pytest.mark.parametrize('white', [True, False])
@pytest.mark.parametrize('mean', MEANS)
def test_monte_carlo_1_din(white, mean):
with session_context() as sess:
k = gpflow.kernels.RBF(1, variance=DataMC1.rng.rand())
m = mean_function_factory(DataMC1.rng, mean, DataMC1.D_in, DataMC1.D_out)
model = MomentMatchingSVGP(
DataMC1.X, DataMC1.Y, k, gpflow.likelihoods.Gaussian(),
Z=DataMC1.X.copy(), whiten=white)
model.full_cov_output = True
gpflow.train.AdamOptimizer().minimize(model, maxiter=50)
pred_mm = model.uncertain_predict_f_moment_matching(
tf.constant(DataMC1.Xnew_mu), tf.constant(DataMC1.Xnew_covar))
mean1, var1 = sess.run(pred_mm)
for n in range(DataMC1.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC1.Xnew_mu[n, ...],
DataMC1.Xnew_covar[n, ...] ** 0.5)
assert_almost_equal(mean1[n, ...], mean2, decimal=3)
assert_almost_equal(var1[n, ...], var2, decimal=2)
@pytest.mark.parametrize('white', [True, False])
@pytest.mark.parametrize('mean', MEANS)
def test_monte_carlo_2_din(white, mean):
with session_context() as sess:
k = gpflow.kernels.RBF(DataMC2.D_in, variance=DataMC2.rng.rand())
m = mean_function_factory(DataMC2.rng, mean, DataMC2.D_in, DataMC2.D_out)
model = MomentMatchingSVGP(
DataMC2.X, DataMC2.Y, k, gpflow.likelihoods.Gaussian(),
mean_function=m, Z=DataMC2.X.copy(), whiten=white)
model.full_cov_output = True
gpflow.train.AdamOptimizer().minimize(model)
pred_mm = model.uncertain_predict_f_moment_matching(
tf.constant(DataMC2.Xnew_mu), tf.constant(DataMC2.Xnew_covar))
mean1, var1 = sess.run(pred_mm)
for n in range(DataMC2.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC2.Xnew_mu[n, ...],
DataMC2.L[n, ...])
assert_almost_equal(mean1[n, ...], mean2, decimal=2)
| assert_almost_equal(var1[n, ...], var2, decimal=2) | numpy.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
#import phasor.numerics.dispatched as dmath
#import sympy
import declarative
from ..base.autograft import (
invalidate_auto,
Element,
)
from .utils import (
TargetLeft,
TargetRight,
TargetIdx,
matrix_space,
)
from . import bases
from . import space
from . import standard_attrs as attrs
class System(
bases.MatrixAtsCompositeBase,
):
@declarative.mproperty
def _internal(self):
return Element()
_loc_default = ('loc_m', None)
loc_m = attrs.generate_loc_m()
_boundary_left_default = ('boundary_left_m', None)
boundary_left = attrs.generate_boundary_left_m()
_boundary_right_default = ('boundary_right_m', None)
boundary_right = attrs.generate_boundary_right_m()
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def offset_m(self, arg = declarative.NOARG):
return 0
if arg is declarative.NOARG:
if self.component_pos_pairings.any_abs_pos:
arg = self.positions_list[0]
else:
arg = None
return arg
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def width_m(self):
return self.positions_list[-1] - self.positions_list[0]
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def components(self, comp_list = declarative.NOARG):
if comp_list is not declarative.NOARG:
for component in comp_list:
if isinstance(component, declarative.PropertyTransforming):
with self.building:
self.insert(component)
else:
with self.building:
self.insert(
obj = component.replica_generate(),
name = component.name_child,
)
self.root._complete()
loc_ch_list = []
for name, ch in list(self._registry_children.items()):
if isinstance(ch, bases.MatrixAtsBase):
if ch.loc_m.ref is not None:
loc_ch_list.append((ch.loc_m.ref, ch))
loc_ch_list.sort()
#print(loc_ch_list)
clist = [ch for loc, ch in loc_ch_list]
return clist
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def component_pos_pairings(self):
with self.building:
try:
del self.ctree['_internal']
except KeyError:
pass
components_pos = []
components_filled = []
loc_m = 0
loc_m_prev = None
pos_list = []
if self.boundary_left_m.ref is not None:
for idx, comp in enumerate(self.components):
if comp.loc_m.ref >= self.boundary_left_m.ref:
lslice = idx
break
else:
if comp.loc_m.ref + comp.width_m >= self.boundary_left_m.ref:
raise NotImplementedError("currently does not support system truncation that cuts objects (including subsystems)")
else:
#if all of the components are behind the truncation, then index outside the component list
lslice = idx + 1
else:
lslice = None
if self.boundary_right_m.ref is not None:
for idx, comp in enumerate(reversed(self.components)):
if comp.loc_m.ref <= self.boundary_right_m.ref:
if comp.loc_m.ref + comp.width_m >= self.boundary_right_m.ref:
raise NotImplementedError("currently does not support system truncation that cuts objects (including subsystems)")
#this gives the actual (non reversed) index + 1
rslice = len(self.components) - idx
break
else:
#if all of the components are behind the truncation, then index outside the component list
rslice = 0
else:
rslice = None
if not self.env_reversed:
comp_iter = iter(self.components[slice(lslice, rslice)])
else:
comp_iter = iter(reversed(self.components[slice(lslice, rslice)]))
for idx, comp in enumerate(comp_iter):
loc_m = None
if comp.loc_m.val is not None:
#builds using negative indices when reversed
if not self.env_reversed:
loc_m = comp.loc_m.val
else:
loc_m = -comp.width_m - comp.loc_m.val
#TODO make this typesafe for casadi MX
#if idx != 0 and loc_m < loc_m_prev:
# print("OUT OF SEQ: ", comp, loc_m, loc_m_prev)
# raise RuntimeError("Objects stacked out of sequence. Maybe you meant to insert objects into an inner system.")
#TODO
if loc_m is not None:
#put in a space to make up the gap
if idx != 0:
pos_list.append(loc_m_prev)
name = 'auto_space{0}'.format(idx)
components_filled.append(
self._internal.insert(
obj = space.Space(
L_m = loc_m - loc_m_prev,
loc_m = loc_m,
#ctree = self.ctree['internal'][name],
),
name = name,
invalidate = False,
)
)
elif (
(lslice is not None and not self.env_reversed) or
(rslice is not None and self.env_reversed)
) :
#put in a space for the gap the truncation edge
if self.env_reversed:
loc_m_prev = -self.boundary_right_m.val
else:
loc_m_prev = self.boundary_left_m.val
pos_list.append(loc_m_prev)
name = 'auto_space{0}'.format(idx)
components_filled.append(
self._internal.insert(
obj = space.Space(
L_m = loc_m - loc_m_prev,
loc_m = loc_m,
#ctree = self.ctree['internal'][name],
),
name = name,
invalidate = False,
)
)
else:
loc_m = loc_m_prev
pos_list.append(loc_m)
components_pos.append(loc_m)
components_filled.append(comp)
#print comp, loc_m
loc_m += comp.width_m
loc_m_prev = loc_m
#now add the final space if there is a truncation
if (
(rslice is not None and not self.env_reversed) or
(lslice is not None and self.env_reversed)
) :
#put in a space for the gap the truncation edge
if self.env_reversed:
loc_m = -self.boundary_left_m.val
else:
loc_m = self.boundary_right_m.val
pos_list.append(loc_m_prev)
name = 'auto_space{0}'.format(idx + 1)
components_filled.append(
self._internal.insert(
obj = space.Space(
L_m = loc_m - loc_m_prev,
loc_m = loc_m,
#ctree = self.ctree['internal'][name],
),
name = name,
invalidate = False,
)
)
pos_list.append(loc_m)
pos_list = np.asarray(pos_list) - pos_list[0]
return declarative.Bunch(
positions = pos_list,
filled = components_filled,
components_pos = components_pos,
)
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def positions_list(self):
return self.component_pos_pairings.positions
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def filled_list(self):
return self.component_pos_pairings.filled
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def component_matrix_list(self):
mat = np.eye(2)
mat_list = [mat]
for comp in self.filled_list:
mat = comp.matrix * mat
mat_list.append(mat)
return mat_list
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def matrix(self):
return self.component_matrix_list[-1]
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def matrix_inv(self):
mat = np.eye(2)
for comp in reversed(self.filled_list):
#print comp.matrix * comp.matrix_inv
#print comp
mat = comp.matrix_inv * mat
return mat
@declarative.mproperty(simple_delete = True)
@invalidate_auto
def _matrix_between_memomap(self):
return {}
def matrix_between(self, tidx1, tidx2):
result = self._matrix_between_memomap.get((tidx1, tidx2), None)
if result is not None:
return result
if tidx1 == TargetLeft:
if tidx2 == TargetLeft:
mat = np.eye(2)
elif tidx2 == TargetRight:
mat = self.matrix
else:
tidx2_outer = tidx2[-1]
tidx2_inner = TargetIdx(tidx2[:-1])
mat = np.eye(2)
for comp in self.filled_list[:tidx2_outer]:
mat = comp.matrix * mat
mat = self.filled_list[tidx2_outer].matrix_between(TargetLeft, tidx2_inner) * mat
elif tidx1 == TargetRight:
if tidx2 == TargetLeft:
mat = self.matrix**(-1)
elif tidx2 == TargetRight:
mat = | np.eye(2) | numpy.eye |
# License: BSD 3 clause
import unittest
from numpy.testing import assert_almost_equal
import numpy as np
from tick.prox import ProxTV
from tick.prox.tests.prox import TestProx
class ProxTVTest(object):
def test_ProxTV(self):
"""...Test of ProxTV
"""
coeffs = self.coeffs.copy().astype(self.dtype)
l_tv = 0.5
t = 1.7
out = np.array([
-0.40102846, -0.40102846, -0.40102846, -0.31364696, -0.31364696,
1.03937619, 1.03937619, 1.03937619, -0.21598253, -0.21598253
])
prox = ProxTV(l_tv).astype(self.dtype)
val = l_tv * | np.abs(coeffs[1:] - coeffs[:-1]) | numpy.abs |
# import system packages
import os
from datetime import datetime
import sys
import argparse
import traceback
import pickle
import yaml
from argparse import ArgumentParser, RawTextHelpFormatter
# import numpy
import numpy as np
import healpy as hp
# the MPI comunicator class, customized for pycal data storage
from pycal.mpi import get_world, Comm
# the Data class
from pycal.dist import distribute_uniform, Data
# some pycal utils to share informations with the environment
from pycal.utils import Logger, Environment, memreport
# some timers
from pycal.timing import function_timer, GlobalTimers, Timer, gather_timers
from pycal.timing import dump as dump_timing
# the simulate the pointing, the atmosphere and put all the information in the TODGround class
from pycal.todmap import TODGround, OpSimAtmosphere, OpPointingHpix
from pycal.weather import Weather
# Some wrapper to libaatm, they solve the radiative transfer equation in local thermodynamic equilibrium
from pycal.todmap.atm import atm_absorption_coefficient, atm_absorption_coefficient_vec
# helper functions
from pycal.tests._helpers import boresight_focalplane
import pycal.qarray as qa
# focal plane and telescope calsses
from pycal.todmap import Focalplane
from pycal.todmap import Telescope
# set up the output directory for each mc iterations
@function_timer
def setup_output(outdir, comm, mc, freq):
outpath = "{}/{:08}/{:03}".format(outdir, mc, int(freq))
if comm.world_rank == 0:
print("Creating the outpath: {}".format(outpath))
os.makedirs(outpath, exist_ok=True)
return outpath
def load_focalplane(args, comm):
focalplane = None
# Load focalplane information
if comm.comm_world is None or comm.comm_world.rank == 0:
if focalplane is None:
detector_data = {}
with open(r'./strip_focal_plane.yaml') as file:
focalplane=yaml.load(file)
detecotrs=focalplane['horns'].keys()
for i in detecotrs:
directions=focalplane['horns'][i]['orientation']
l=np.arctan(directions[0]/directions[2])
u=np.arctan(directions[1]/directions[2])
zaxis = np.array([0, 0, 1], dtype=np.float64)
angrot = qa.rotation(zaxis, 0 * np.pi / 180.0)
wx = np.rad2deg(l) * np.pi / 180.0
wy = np.rad2deg(u) * np.pi / 180.0
wz = | np.sqrt(1.0 - (wx * wx + wy * wy)) | numpy.sqrt |
import pyximport; pyximport.install()
import sys
import pandas as pd
from pathlib import Path
from gluonts.model.predictor import Predictor
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.dataset.common import ListDataset
import talib
import numpy as np
import warnings
import scipy.signal as sc
warnings.filterwarnings("ignore")
def load_dataset(filename):
dataset = pd.read_csv(filename, usecols = [0, 1, 5], header=0)
dataset = dataset.dropna()
dataset.columns = dataset.columns.to_series().apply(lambda x: x.strip())
df = dataset
if 'timestamp' not in df.columns:
df['timestamp'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
timestamp = df.pop("timestamp")
df.insert(0, timestamp.name, timestamp)
df.drop(columns=['Date', 'Time'], inplace=True, errors='ignore')
dataset = df
features_to_normalize = ['close']
dataset[features_to_normalize] = dataset[features_to_normalize].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
return dataset
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[int(result.size/2):]
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
#assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
if __name__ == "__main__":
filename = sys.argv[1]
df = load_dataset(filename)
test_data = ListDataset(
[{"start": df.index[1], "target": df.values[-12:, 1]}],
freq="1 min"
)
predictor = Predictor.deserialize(Path("."))
for test_entry, forecast in zip(test_data, predictor.predict(test_data)):
print("[", forecast.mean[0], end = " ")
np.seterr(divide='ignore')
tsf5 = talib.TSF(df['close'].values, timeperiod=5)
diff5 = np.diff(tsf5) / np.diff(df['close'].values)
diff5 = np.insert(diff5, 0, 1)
diff5 = np.diff(diff5) / np.diff(df['close'].values)
tsf15 = talib.TSF(df['close'].values, timeperiod=15)
diff15 = np.diff(tsf15) / | np.diff(df['close'].values) | numpy.diff |
#!/usr/bin/env python3
"""Training and evaluation entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import argparse
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.framework import dtypes
from scipy.spatial import KDTree
from common.util import Dataset
from common.util import ACTIVATION_MAP
from tqdm import trange
import pathlib
import logging
from common.util import summary_writer
from common.gen_experiments import load_and_save_params
import time
import pickle as pkl
tf.logging.set_verbosity(tf.logging.INFO)
logging.basicConfig(level=logging.INFO)
def _load_mini_imagenet(data_dir, split):
"""Load mini-imagenet from numpy's npz file format."""
_split_tag = {'sources': 'train', 'target_val': 'val', 'target_tst': 'test'}[split]
dataset_path = os.path.join(data_dir, 'few-shot-{}.npz'.format(_split_tag))
logging.info("Loading mini-imagenet...")
data = np.load(dataset_path)
fields = data['features'], data['targets']
logging.info("Done loading.")
return fields
def get_image_size(data_dir):
if 'mini-imagenet' or 'tiered' in data_dir:
image_size = 84
elif 'cifar' in data_dir:
image_size = 32
else:
raise Exception('Unknown dataset: %s' % data_dir)
return image_size
class Namespace(object):
def __init__(self, adict):
self.__dict__.update(adict)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'eval', 'test', 'train_classifier', 'create_embedding'])
# Dataset parameters
parser.add_argument('--data_dir', type=str, default=None, help='Path to the data.')
parser.add_argument('--data_split', type=str, default='sources', choices=['sources', 'target_val', 'target_tst'],
help='Split of the data to be used to perform operation.')
# Training parameters
parser.add_argument('--number_of_steps', type=int, default=int(30000),
help="Number of training steps (number of Epochs in Hugo's paper)")
parser.add_argument('--number_of_steps_to_early_stop', type=int, default=int(1000000),
help="Number of training steps after half way to early stop the training")
parser.add_argument('--log_dir', type=str, default='', help='Base log dir')
parser.add_argument('--num_classes_train', type=int, default=5,
help='Number of classes in the train phase, this is coming from the prototypical networks')
parser.add_argument('--num_shots_train', type=int, default=5,
help='Number of shots in a few shot meta-train scenario')
parser.add_argument('--train_batch_size', type=int, default=32, help='Training batch size.')
parser.add_argument('--num_tasks_per_batch', type=int, default=2,
help='Number of few shot tasks per batch, so the task encoding batch is num_tasks_per_batch x num_classes_test x num_shots_train .')
parser.add_argument('--init_learning_rate', type=float, default=0.1, help='Initial learning rate.')
parser.add_argument('--save_summaries_secs', type=int, default=60, help='Time between saving summaries')
parser.add_argument('--save_interval_secs', type=int, default=60, help='Time between saving model?')
parser.add_argument('--optimizer', type=str, default='sgd', choices=['sgd', 'adam'])
parser.add_argument('--augment', type=bool, default=False)
# Learning rate paramteres
parser.add_argument('--lr_anneal', type=str, default='pwc', choices=['const', 'pwc', 'cos', 'exp'])
parser.add_argument('--n_lr_decay', type=int, default=3)
parser.add_argument('--lr_decay_rate', type=float, default=10.0)
parser.add_argument('--num_steps_decay_pwc', type=int, default=2500,
help='Decay learning rate every num_steps_decay_pwc')
parser.add_argument('--clip_gradient_norm', type=float, default=1.0, help='gradient clip norm.')
parser.add_argument('--weights_initializer_factor', type=float, default=0.1,
help='multiplier in the variance of the initialization noise.')
# Evaluation parameters
parser.add_argument('--max_number_of_evaluations', type=float, default=float('inf'))
parser.add_argument('--eval_interval_secs', type=int, default=120, help='Time between evaluating model?')
parser.add_argument('--eval_interval_steps', type=int, default=1000,
help='Number of train steps between evaluating model in the training loop')
parser.add_argument('--eval_interval_fine_steps', type=int, default=250,
help='Number of train steps between evaluating model in the training loop in the final phase')
# Test parameters
parser.add_argument('--num_classes_test', type=int, default=5, help='Number of classes in the test phase')
parser.add_argument('--num_shots_test', type=int, default=5,
help='Number of shots in a few shot meta-test scenario')
parser.add_argument('--num_cases_test', type=int, default=100000,
help='Number of few-shot cases to compute test accuracy')
# Architecture parameters
parser.add_argument('--dropout', type=float, default=1.0)
parser.add_argument('--conv_dropout', type=float, default=None)
parser.add_argument('--feature_dropout_p', type=float, default=None)
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument('--num_filters', type=int, default=64)
parser.add_argument('--num_units_in_block', type=int, default=3)
parser.add_argument('--num_blocks', type=int, default=4)
parser.add_argument('--num_max_pools', type=int, default=3)
parser.add_argument('--block_size_growth', type=float, default=2.0)
parser.add_argument('--activation', type=str, default='swish-1', choices=['relu', 'selu', 'swish-1'])
parser.add_argument('--feature_expansion_size', type=int, default=None)
parser.add_argument('--feature_bottleneck_size', type=int, default=None)
parser.add_argument('--feature_extractor', type=str, default='simple_res_net',
choices=['simple_res_net'], help='Which feature extractor to use')
parser.add_argument('--encoder_sharing', type=str, default='shared',
choices=['shared'],
help='How to link fetaure extractors in task encoder and classifier')
parser.add_argument('--encoder_classifier_link', type=str, default='prototypical',
choices=['prototypical'],
help='How to link fetaure extractors in task encoder and classifier')
parser.add_argument('--embedding_pooled', type=bool, default=True,
help='Whether to use avg pooling to create embedding')
parser.add_argument('--task_encoder', type=str, default='self_att_mlp',
choices=['class_mean', 'fixed_alpha','fixed_alpha_mlp','self_att_mlp'])
#
parser.add_argument('--num_batches_neg_mining', type=int, default=0)
parser.add_argument('--eval_batch_size', type=int, default=100, help='Evaluation batch size')
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--mlp_weight_decay', type=float, default=0.0)
parser.add_argument('--mlp_dropout', type=float, default=0.0)
parser.add_argument('--mlp_type', type=str, default='non-linear')
parser.add_argument('--att_input', type=str, default='word')
args = parser.parse_args()
print(args)
return args
def get_logdir_name(flags):
"""Generates the name of the log directory from the values of flags
Parameters
----------
flags: neural net architecture generated by get_arguments()
Outputs
-------
the name of the directory to store the training and evaluation results
"""
logdir = flags.log_dir
return logdir
class ScaledVarianceRandomNormal(init_ops.Initializer):
"""Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
for dim in shape[:-2]:
n *= float(dim)
self.stddev = np.sqrt(self.factor * 2.0 / n)
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def _get_scope(is_training, flags):
normalizer_params = {
'epsilon': 0.001,
'momentum': .95,
'trainable': is_training,
'training': is_training,
}
conv2d_arg_scope = slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=ACTIVATION_MAP[flags.activation],
normalizer_fn=tf.layers.batch_normalization,
normalizer_params=normalizer_params,
# padding='SAME',
trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=flags.weight_decay),
weights_initializer=ScaledVarianceRandomNormal(factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0)
)
dropout_arg_scope = slim.arg_scope(
[slim.dropout],
keep_prob=flags.dropout,
is_training=is_training)
return conv2d_arg_scope, dropout_arg_scope
def build_simple_conv_net(images, flags, is_training, reuse=None, scope=None):
conv2d_arg_scope, dropout_arg_scope = _get_scope(is_training, flags)
with conv2d_arg_scope, dropout_arg_scope:
with tf.variable_scope(scope or 'feature_extractor', reuse=reuse):
h = images
for i in range(4):
h = slim.conv2d(h, num_outputs=flags.num_filters, kernel_size=3, stride=1,
scope='conv' + str(i), padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(factor=flags.weights_initializer_factor))
h = slim.max_pool2d(h, kernel_size=2, stride=2, padding='VALID', scope='max_pool' + str(i))
if flags.embedding_pooled == True:
kernel_size = h.shape.as_list()[-2]
h = slim.avg_pool2d(h, kernel_size=kernel_size, scope='avg_pool')
h = slim.flatten(h)
return h
def leaky_relu(x, alpha=0.1, name=None):
return tf.maximum(x, alpha * x, name=name)
def build_simple_res_net(images, flags, num_filters, beta=None, gamma=None, is_training=False, reuse=None, scope=None):
conv2d_arg_scope, dropout_arg_scope = _get_scope(is_training, flags)
activation_fn = ACTIVATION_MAP[flags.activation]
with conv2d_arg_scope, dropout_arg_scope:
with tf.variable_scope(scope or 'feature_extractor', reuse=reuse):
h = images
for i in range(len(num_filters)):
# make shortcut
shortcut = slim.conv2d(h, num_outputs=num_filters[i], kernel_size=1, stride=1,
activation_fn=None,
scope='shortcut' + str(i), padding='SAME')
for j in range(flags.num_units_in_block):
h = slim.conv2d(h, num_outputs=num_filters[i], kernel_size=3, stride=1,
scope='conv' + str(i) + '_' + str(j), padding='SAME', activation_fn=None)
if flags.conv_dropout:
h = slim.dropout(h, keep_prob=1.0 - flags.conv_dropout)
if j < (flags.num_units_in_block - 1):
h = activation_fn(h, name='activation_' + str(i) + '_' + str(j))
h = h + shortcut
h = activation_fn(h, name='activation_' + str(i) + '_' + str(flags.num_units_in_block - 1))
if i < flags.num_max_pools:
h = slim.max_pool2d(h, kernel_size=2, stride=2, padding='SAME', scope='max_pool' + str(i))
if flags.feature_expansion_size:
if flags.feature_dropout_p:
h = slim.dropout(h, scope='feature_expansion_dropout', keep_prob=1.0 - flags.feature_dropout_p)
h = slim.conv2d(slim.dropout(h), num_outputs=flags.feature_expansion_size, kernel_size=1, stride=1,
scope='feature_expansion', padding='SAME')
if flags.embedding_pooled == True:
kernel_size = h.shape.as_list()[-2]
h = slim.avg_pool2d(h, kernel_size=kernel_size, scope='avg_pool')
h = slim.flatten(h)
if flags.feature_dropout_p:
h = slim.dropout(h, scope='feature_bottleneck_dropout', keep_prob=1.0 - flags.feature_dropout_p)
# Bottleneck layer
if flags.feature_bottleneck_size:
h = slim.fully_connected(h, num_outputs=flags.feature_bottleneck_size,
activation_fn=activation_fn, normalizer_fn=None,
scope='feature_bottleneck')
return h
def build_wordemb_transformer(embeddings, flags, is_training=False, reuse=None, scope=None):
with tf.variable_scope(scope or 'mlp_transformer', reuse=reuse):
h = embeddings
if flags.mlp_type=='linear':
h = slim.fully_connected(h, 512, reuse=False, scope='mlp_layer',
activation_fn=None, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
elif flags.mlp_type=='non-linear':
h = slim.fully_connected(h, 300, reuse=False, scope='mlp_layer',
activation_fn=tf.nn.relu, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(
scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(
factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
h = slim.dropout(h, scope='mlp_dropout', keep_prob=1.0 - flags.mlp_dropout, is_training=is_training)
h = slim.fully_connected(h, 512, reuse=False, scope='mlp_layer_1',
activation_fn=None, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(
scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(
factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
return h
def build_self_attention(embeddings, flags, is_training=False, reuse=None, scope=None):
with tf.variable_scope(scope or 'self_attention', reuse=reuse):
h = embeddings
if flags.mlp_type=='linear':
h = slim.fully_connected(h, 1, reuse=False, scope='self_att_layer',
activation_fn=None, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
elif flags.mlp_type=='non-linear':
h = slim.fully_connected(h, 300, reuse=False, scope='self_att_layer',
activation_fn=tf.nn.relu, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(
scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(
factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
h = slim.dropout(h, scope='self_att_dropout', keep_prob=1.0 - flags.mlp_dropout, is_training=is_training)
h = slim.fully_connected(h, 1, reuse=False, scope='self_att_layer_1',
activation_fn=None, trainable=is_training,
weights_regularizer=tf.contrib.layers.l2_regularizer(
scale=flags.mlp_weight_decay),
weights_initializer=ScaledVarianceRandomNormal(
factor=flags.weights_initializer_factor),
biases_initializer=tf.constant_initializer(0.0))
h = tf.sigmoid(h)
return h
def get_res_net_block(h, flags, num_filters, num_units, pool=False, is_training=False,
reuse=None, scope=None):
conv2d_arg_scope, dropout_arg_scope = _get_scope(is_training, flags)
activation_fn = ACTIVATION_MAP[flags.activation]
with conv2d_arg_scope, dropout_arg_scope:
with tf.variable_scope(scope, reuse=reuse):
# make shortcut
shortcut = slim.conv2d(h, num_outputs=num_filters, kernel_size=1, stride=1,
activation_fn=None,
scope='shortcut', padding='SAME')
for j in range(num_units):
h = slim.conv2d(h, num_outputs=num_filters, kernel_size=3, stride=1,
scope='conv_' + str(j), padding='SAME', activation_fn=None)
if flags.conv_dropout:
h = slim.dropout(h, keep_prob=1.0 - flags.conv_dropout)
if j < (num_units - 1):
h = activation_fn(h, name='activation_' + str(j))
h = h + shortcut
h = activation_fn(h, name='activation_' + '_' + str(flags.num_units_in_block - 1))
if pool:
h = slim.max_pool2d(h, kernel_size=2, stride=2, padding='SAME', scope='max_pool')
return h
def build_feature_extractor_graph(images, flags, num_filters, beta=None, gamma=None, is_training=False,
scope='feature_extractor_task_encoder', reuse=None, is_64way=False):
if flags.feature_extractor == 'simple_conv_net':
h = build_simple_conv_net(images, flags=flags, is_training=is_training, reuse=reuse, scope=scope)
elif flags.feature_extractor == 'simple_res_net':
h = build_simple_res_net(images, flags=flags, num_filters=num_filters, beta=beta, gamma=gamma,
is_training=is_training, reuse=reuse, scope=scope)
else:
h = None
embedding_shape = h.get_shape().as_list()
if is_training and is_64way is False:
h = tf.reshape(h, shape=(flags.num_tasks_per_batch, embedding_shape[0] // flags.num_tasks_per_batch, -1),
name='reshape_to_separate_tasks_generic_features')
else:
h = tf.reshape(h, shape=(1, embedding_shape[0], -1),
name='reshape_to_separate_tasks_generic_features')
return h
def build_task_encoder(embeddings, label_embeddings, flags, is_training, querys=None, reuse=None, scope='class_encoder'):
conv2d_arg_scope, dropout_arg_scope = _get_scope(is_training, flags)
alpha=None
with conv2d_arg_scope, dropout_arg_scope:
with tf.variable_scope(scope, reuse=reuse):
if flags.task_encoder == 'talkthrough':
task_encoding = embeddings
elif flags.task_encoder == 'class_mean':
task_encoding = embeddings
if is_training:
task_encoding = tf.reshape(task_encoding, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, flags.num_shots_train, -1),
name='reshape_to_separate_tasks_task_encoding')
else:
task_encoding = tf.reshape(task_encoding,
shape=(1, flags.num_classes_test, flags.num_shots_test, -1),
name='reshape_to_separate_tasks_task_encoding')
task_encoding = tf.reduce_mean(task_encoding, axis=2, keep_dims=False)
elif flags.task_encoder == 'fixed_alpha':
task_encoding = embeddings
print("entered the word embedding task encoder...")
if is_training:
task_encoding = tf.reshape(task_encoding, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, flags.num_shots_train, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, -1),
name='reshape_to_separate_tasks_label_embedding')
else:
task_encoding = tf.reshape(task_encoding,
shape=(1, flags.num_classes_test, flags.num_shots_test, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings,
shape=(1, flags.num_classes_test, -1),
name='reshape_to_separate_tasks_label_embedding')
task_encoding = tf.reduce_mean(task_encoding, axis=2, keep_dims=False)
task_encoding = flags.alpha*task_encoding+(1-flags.alpha)*label_embeddings
elif flags.task_encoder == 'fixed_alpha_mlp':
task_encoding = embeddings
print("entered the word embedding task encoder...")
label_embeddings = build_wordemb_transformer(label_embeddings,flags,is_training)
if is_training:
task_encoding = tf.reshape(task_encoding, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, flags.num_shots_train, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, -1),
name='reshape_to_separate_tasks_label_embedding')
else:
task_encoding = tf.reshape(task_encoding,
shape=(1, flags.num_classes_test, flags.num_shots_test, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings,
shape=(1, flags.num_classes_test, -1),
name='reshape_to_separate_tasks_label_embedding')
task_encoding = tf.reduce_mean(task_encoding, axis=2, keep_dims=False)
task_encoding = flags.alpha*task_encoding+(1-flags.alpha)*label_embeddings
elif flags.task_encoder == 'self_att_mlp':
task_encoding = embeddings
print("entered the word embedding task encoder...")
label_embeddings = build_wordemb_transformer(label_embeddings,flags,is_training)
if is_training:
task_encoding = tf.reshape(task_encoding, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, flags.num_shots_train, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, -1),
name='reshape_to_separate_tasks_label_embedding')
else:
task_encoding = tf.reshape(task_encoding,
shape=(1, flags.num_classes_test, flags.num_shots_test, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings,
shape=(1, flags.num_classes_test, -1),
name='reshape_to_separate_tasks_label_embedding')
task_encoding = tf.reduce_mean(task_encoding, axis=2, keep_dims=False)
if flags.att_input=='proto':
alpha = build_self_attention(task_encoding,flags,is_training)
elif flags.att_input=='word':
alpha = build_self_attention(label_embeddings,flags,is_training)
elif flags.att_input=='combined':
embeddings=tf.concat([task_encoding, label_embeddings], axis=2)
alpha = build_self_attention(embeddings, flags, is_training)
elif flags.att_input=='queryword':
j = label_embeddings.get_shape().as_list()[1]
i = querys.get_shape().as_list()[1]
task_encoding_tile = tf.expand_dims(task_encoding, axis=1)
task_encoding_tile = tf.tile(task_encoding_tile, (1, i, 1, 1))
querys_tile = tf.expand_dims(querys, axis=2)
querys_tile = tf.tile(querys_tile, (1, 1, j, 1))
label_embeddings_tile = tf.expand_dims(label_embeddings, axis=1)
label_embeddings_tile = tf.tile(label_embeddings_tile, (1, i, 1, 1))
att_input = tf.concat([label_embeddings_tile, querys_tile], axis=3)
alpha = build_self_attention(att_input, flags, is_training)
elif flags.att_input=='queryproto':
j = task_encoding.get_shape().as_list()[1]
i = querys.get_shape().as_list()[1]
task_encoding_tile = tf.expand_dims(task_encoding, axis=1)
task_encoding_tile = tf.tile(task_encoding_tile, (1, i, 1, 1))
querys_tile = tf.expand_dims(querys, axis=2)
querys_tile = tf.tile(querys_tile, (1, 1, j, 1))
label_embeddings_tile = tf.expand_dims(label_embeddings, axis=1)
label_embeddings_tile = tf.tile(label_embeddings_tile, (1, i, 1, 1))
att_input = tf.concat([task_encoding_tile, querys_tile], axis=3)
alpha = build_self_attention(att_input, flags, is_training)
if querys is None:
task_encoding = alpha*task_encoding+(1-alpha)*label_embeddings
else:
task_encoding = alpha * task_encoding_tile + (1-alpha) * label_embeddings_tile
else:
task_encoding = None
return task_encoding, alpha
def build_prototypical_head(features_generic, task_encoding, flags, is_training, scope='prototypical_head'):
"""
Implements the prototypical networks few-shot head
:param features_generic:
:param task_encoding:
:param flags:
:param is_training:
:param reuse:
:param scope:
:return:
"""
with tf.variable_scope(scope):
if len(features_generic.get_shape().as_list()) == 2:
features_generic = tf.expand_dims(features_generic, axis=0)
if len(task_encoding.get_shape().as_list()) == 2:
task_encoding = tf.expand_dims(task_encoding, axis=0)
# i is the number of steps in the task_encoding sequence
# j is the number of steps in the features_generic sequence
j = task_encoding.get_shape().as_list()[1]
i = features_generic.get_shape().as_list()[1]
# tile to be able to produce weight matrix alpha in (i,j) space
features_generic = tf.expand_dims(features_generic, axis=2)
task_encoding = tf.expand_dims(task_encoding, axis=1)
# features_generic changes over i and is constant over j
# task_encoding changes over j and is constant over i
task_encoding_tile = tf.tile(task_encoding, (1, i, 1, 1))
features_generic_tile = tf.tile(features_generic, (1, 1, j, 1))
# implement equation (4)
euclidian = -tf.norm(task_encoding_tile - features_generic_tile, name='neg_euclidian_distance', axis=-1)
if is_training:
euclidian = tf.reshape(euclidian, shape=(flags.num_tasks_per_batch * flags.train_batch_size, -1))
else:
euclidian_shape = euclidian.get_shape().as_list()
euclidian = tf.reshape(euclidian, shape=(euclidian_shape[1], -1))
return euclidian
def build_prototypical_head_protoperquery(features_generic, task_encoding, flags, is_training, scope='prototypical_head'):
"""
Implements the prototypical networks few-shot head
:param features_generic:
:param task_encoding:
:param flags:
:param is_training:
:param reuse:
:param scope:
:return:
"""
# the shape of task_encoding is [num_tasks, batch_size, num_classes, ]
with tf.variable_scope(scope):
if len(features_generic.get_shape().as_list()) == 2:
features_generic = tf.expand_dims(features_generic, axis=0)
if len(task_encoding.get_shape().as_list()) == 2:
task_encoding = tf.expand_dims(task_encoding, axis=0)
# i is the number of steps in the task_encoding sequence
# j is the number of steps in the features_generic sequence
j = task_encoding.get_shape().as_list()[2]
i = features_generic.get_shape().as_list()[1]
# tile to be able to produce weight matrix alpha in (i,j) space
features_generic = tf.expand_dims(features_generic, axis=2)
#task_encoding = tf.expand_dims(task_encoding, axis=1)
# features_generic changes over i and is constant over j
# task_encoding changes over j and is constant over i
features_generic_tile = tf.tile(features_generic, (1, 1, j, 1))
# implement equation (4)
euclidian = -tf.norm(task_encoding - features_generic_tile, name='neg_euclidian_distance', axis=-1)
if is_training:
euclidian = tf.reshape(euclidian, shape=(flags.num_tasks_per_batch * flags.train_batch_size, -1))
else:
euclidian_shape = euclidian.get_shape().as_list()
euclidian = tf.reshape(euclidian, shape=(euclidian_shape[1], -1))
return euclidian
def build_regularizer_head(embeddings, label_embeddings, flags, is_training, scope='regularizer_head'):
"""
Implements the prototypical networks few-shot head
:param features_generic:
:param task_encoding:
:param flags:
:param is_training:
:param reuse:
:param scope:
:return:
"""
with tf.variable_scope(scope):
task_encoding = embeddings
if is_training:
task_encoding = tf.reshape(task_encoding, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, flags.num_shots_train, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings, shape=(
flags.num_tasks_per_batch, flags.num_classes_train, -1),
name='reshape_to_separate_tasks_label_embedding')
else:
task_encoding = tf.reshape(task_encoding,
shape=(1, flags.num_classes_test, flags.num_shots_test, -1),
name='reshape_to_separate_tasks_task_encoding')
label_embeddings = tf.reshape(label_embeddings,
shape=(1, flags.num_classes_test, -1),
name='reshape_to_separate_tasks_label_embedding')
task_encoding = tf.reduce_mean(task_encoding, axis=2, keep_dims=False)
# i is the number of steps in the task_encoding sequence
# j is the number of steps in the features_generic sequence
j = task_encoding.get_shape().as_list()[1]
i = label_embeddings.get_shape().as_list()[1]
# tile to be able to produce weight matrix alpha in (i,j) space
task_encoding = tf.expand_dims(task_encoding, axis=2)
label_embeddings = tf.expand_dims(label_embeddings, axis=1)
# features_generic changes over i and is constant over j
# task_encoding changes over j and is constant over i
label_embeddings_tile = tf.tile(label_embeddings, (1, i, 1, 1))
task_encoding_tile = tf.tile(task_encoding, (1, 1, j, 1))
# implement equation (4)
euclidian = -tf.norm(task_encoding_tile - label_embeddings_tile, name='neg_euclidian_distance_regularizer', axis=-1)
if is_training:
euclidian = tf.reshape(euclidian, shape=(flags.num_tasks_per_batch * flags.num_classes_train, -1))
else:
euclidian_shape = euclidian.get_shape().as_list()
euclidian = tf.reshape(euclidian, shape=(euclidian_shape[1], -1))
return euclidian
def placeholder_inputs(batch_size, image_size, scope):
"""
:param batch_size:
:return: placeholders for images and
"""
with tf.variable_scope(scope):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, 3), name='images')
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size), name='labels')
return images_placeholder, labels_placeholder
def get_batch(data_set, images_placeholder, labels_placeholder, batch_size):
"""
:param data_set:
:param images_placeholder:
:param labels_placeholder:
:return:
"""
images_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
images_placeholder: images_feed.astype(dtype=np.float32),
labels_placeholder: labels_feed,
}
return feed_dict
def preprocess(images):
# mean = tf.constant(np.asarray([127.5, 127.5, 127.5]).reshape([1, 1, 3]), dtype=tf.float32, name='image_mean')
# std = tf.constant(np.asarray([127.5, 127.5, 127.5]).reshape([1, 1, 3]), dtype=tf.float32, name='image_std')
# return tf.div(tf.subtract(images, mean), std)
std = tf.constant(np.asarray([0.5, 0.5, 0.5]).reshape([1, 1, 3]), dtype=tf.float32, name='image_std')
return tf.div(images, std)
def get_nearest_neighbour_acc(flags, embeddings, labels):
num_correct = 0
num_tot = 0
for i in trange(flags.num_cases_test):
test_classes = np.random.choice(np.unique(labels), size=flags.num_classes_test, replace=False)
train_idxs, test_idxs = get_few_shot_idxs(labels=labels, classes=test_classes, num_shots=flags.num_shots_test)
# TODO: this is to fix the OOM error, this can be removed when embed() supports batch processing
test_idxs = np.random.choice(test_idxs, size=100, replace=False)
np_embedding_train = embeddings[train_idxs]
# Using the np.std instead of np.linalg.norm improves results by around 1-1.5%
np_embedding_train = np_embedding_train / np.std(np_embedding_train, axis=1, keepdims=True)
# np_embedding_train = np_embedding_train / np.linalg.norm(np_embedding_train, axis=1, keepdims=True)
labels_train = labels[train_idxs]
np_embedding_test = embeddings[test_idxs]
np_embedding_test = np_embedding_test / np.std(np_embedding_test, axis=1, keepdims=True)
# np_embedding_test = np_embedding_test / np.linalg.norm(np_embedding_test, axis=1, keepdims=True)
labels_test = labels[test_idxs]
kdtree = KDTree(np_embedding_train)
nns, nn_idxs = kdtree.query(np_embedding_test, k=1)
labels_predicted = labels_train[nn_idxs]
num_matches = sum(labels_predicted == labels_test)
num_correct += num_matches
num_tot += len(labels_predicted)
# print("Accuracy: ", (100.0 * num_correct) / num_tot)
return (100.0 * num_correct) / num_tot
def build_inference_graph(images_deploy_pl, images_task_encode_pl, flags, is_training,
is_primary, label_embeddings):
num_filters = [round(flags.num_filters * pow(flags.block_size_growth, i)) for i in range(flags.num_blocks)]
reuse = not is_primary
alpha=None
with tf.variable_scope('Model'):
feature_extractor_encoding_scope = 'feature_extractor_encoder'
features_task_encode = build_feature_extractor_graph(images=images_task_encode_pl, flags=flags,
is_training=is_training,
num_filters=num_filters,
scope=feature_extractor_encoding_scope,
reuse=False)
if flags.encoder_sharing == 'shared':
ecoder_reuse = True
feature_extractor_classifier_scope = feature_extractor_encoding_scope
elif flags.encoder_sharing == 'siamese':
# TODO: in the case of pretrained feature extractor this is not good,
# because the classfier part will be randomly initialized
ecoder_reuse = False
feature_extractor_classifier_scope = 'feature_extractor_classifier'
else:
raise Exception('Option not implemented')
if flags.encoder_classifier_link == 'prototypical':
#flags.task_encoder = 'class_mean'
features_generic = build_feature_extractor_graph(images=images_deploy_pl, flags=flags,
is_training=is_training,
scope=feature_extractor_classifier_scope,
num_filters=num_filters,
reuse=ecoder_reuse)
querys = None
if 'query' in flags.att_input:
querys = features_generic
task_encoding, alpha = build_task_encoder(embeddings=features_task_encode,
label_embeddings=label_embeddings,
flags=flags, is_training=is_training, reuse=reuse, querys=querys,
threshold=flags.alpha)
if 'query' in flags.att_input:
logits = build_prototypical_head_protoperquery(features_generic, task_encoding, flags,
is_training=is_training)
else:
logits = build_prototypical_head(features_generic, task_encoding, flags, is_training=is_training)
# logits_regularizer = build_regularizer_head(embeddings= features_task_encode,
# label_embeddings=label_embeddings, flags=flags,
# is_training=is_training )
else:
raise Exception('Option not implemented')
return logits, None, features_task_encode, features_generic, alpha
def get_train_datasets(flags):
mini_imagenet = _load_mini_imagenet(data_dir=flags.data_dir, split='sources')
few_shot_data_train = Dataset(mini_imagenet)
pretrain_data_train, pretrain_data_test = None, None
return few_shot_data_train, pretrain_data_train, pretrain_data_test
def get_pwc_learning_rate(global_step, flags):
learning_rate = tf.train.piecewise_constant(global_step, [np.int64(flags.number_of_steps / 2),
np.int64(
flags.number_of_steps / 2 + flags.num_steps_decay_pwc),
np.int64(
flags.number_of_steps / 2 + 2 * flags.num_steps_decay_pwc)],
[flags.init_learning_rate, flags.init_learning_rate * 0.1,
flags.init_learning_rate * 0.01,
flags.init_learning_rate * 0.001])
return learning_rate
def create_hard_negative_batch(misclass, feed_dict, sess, few_shot_data_train, flags,
images_deploy_pl, labels_deploy_pl, images_task_encode_pl, labels_task_encode_pl):
"""
:param logits:
:param feed_dict:
:param sess:
:param few_shot_data_train:
:param flags:
:param images_deploy_pl:
:param labels_deploy_pl:
:param images_task_encode_pl:
:param labels_task_encode_pl:
:return:
"""
feed_dict_test = dict(feed_dict)
misclass_test_final = 0.0
misclass_history = np.zeros(flags.num_batches_neg_mining)
for i in range(flags.num_batches_neg_mining):
images_deploy, labels_deploy, images_task_encode, labels_task_encode = \
few_shot_data_train.next_few_shot_batch(deploy_batch_size=flags.train_batch_size,
num_classes_test=flags.num_classes_train,
num_shots=flags.num_shots_train,
num_tasks=flags.num_tasks_per_batch)
feed_dict_test[images_deploy_pl] = images_deploy.astype(dtype=np.float32)
feed_dict_test[labels_deploy_pl] = labels_deploy
feed_dict_test[images_task_encode_pl] = images_task_encode.astype(dtype=np.float32)
feed_dict_test[labels_task_encode_pl] = labels_task_encode
# logits
misclass_test = sess.run(misclass, feed_dict=feed_dict_test)
misclass_history[i] = misclass_test
if misclass_test > misclass_test_final:
misclass_test_final = misclass_test
feed_dict = dict(feed_dict_test)
return feed_dict
def train(flags):
log_dir = get_logdir_name(flags)
flags.pretrained_model_dir = log_dir
fout=open(log_dir+'/out','a')
log_dir = os.path.join(log_dir, 'train')
# This is setting to run evaluation loop only once
flags.max_number_of_evaluations = 1
flags.eval_interval_secs = 0
image_size = get_image_size(flags.data_dir)
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int64)
global_step_pretrain = tf.Variable(0, trainable=False, name='global_step_pretrain', dtype=tf.int64)
images_deploy_pl, labels_deploy_pl = placeholder_inputs(
batch_size=flags.num_tasks_per_batch * flags.train_batch_size,
image_size=image_size, scope='inputs/deploy')
images_task_encode_pl, _ = placeholder_inputs(
batch_size=flags.num_tasks_per_batch * flags.num_classes_train * flags.num_shots_train,
image_size=image_size, scope='inputs/task_encode')
with tf.variable_scope('inputs/task_encode'):
labels_task_encode_pl_real = tf.placeholder(tf.int64,
shape=(flags.num_tasks_per_batch * flags.num_classes_train), name='labels_real')
labels_task_encode_pl = tf.placeholder(tf.int64,
shape=(flags.num_tasks_per_batch * flags.num_classes_train),
name='labels')
#here is the word embedding layer for training
emb_path = os.path.join(flags.data_dir, 'few-shot-wordemb-{}.npz'.format("train"))
embedding_train = np.load(emb_path)["features"].astype(np.float32)
print(embedding_train.dtype)
logging.info("Loading mini-imagenet...")
W_train = tf.constant(embedding_train, name="W_train")
label_embeddings_train = tf.nn.embedding_lookup(W_train, labels_task_encode_pl_real)
# Primary task operations
logits, regularizer_logits, _, _, alpha = build_inference_graph(images_deploy_pl=images_deploy_pl,
images_task_encode_pl=images_task_encode_pl,
flags=flags, is_training=True, is_primary=True,
label_embeddings=label_embeddings_train)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=tf.one_hot(labels_deploy_pl, flags.num_classes_train)))
# Losses and optimizer
regu_losses = slim.losses.get_regularization_losses()
loss = tf.add_n([loss] + regu_losses)
misclass = 1.0 - slim.metrics.accuracy(tf.argmax(logits, 1), labels_deploy_pl)
# Learning rate
if flags.lr_anneal == 'const':
learning_rate = flags.init_learning_rate
elif flags.lr_anneal == 'pwc':
learning_rate = get_pwc_learning_rate(global_step, flags)
elif flags.lr_anneal == 'exp':
lr_decay_step = flags.number_of_steps // flags.n_lr_decay
learning_rate = tf.train.exponential_decay(flags.init_learning_rate, global_step, lr_decay_step,
1.0 / flags.lr_decay_rate, staircase=True)
else:
raise Exception('Not implemented')
# Optimizer
if flags.optimizer == 'sgd':
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
else:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = slim.learning.create_train_op(total_loss=loss, optimizer=optimizer, global_step=global_step,
clip_gradient_norm=flags.clip_gradient_norm)
tf.summary.scalar('loss', loss)
tf.summary.scalar('misclassification', misclass)
tf.summary.scalar('learning_rate', learning_rate)
# Merge all summaries except for pretrain
summary = tf.summary.merge(tf.get_collection('summaries', scope='(?!pretrain).*'))
# Get datasets
few_shot_data_train, pretrain_data_train, pretrain_data_test = get_train_datasets(flags)
# Define session and logging
summary_writer = tf.summary.FileWriter(log_dir, flush_secs=1)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
supervisor = tf.train.Supervisor(logdir=log_dir, init_feed_dict=None,
summary_op=None,
init_op=tf.global_variables_initializer(),
summary_writer=summary_writer,
saver=saver,
global_step=global_step, save_summaries_secs=flags.save_summaries_secs,
save_model_secs=0) # flags.save_interval_secs
with supervisor.managed_session() as sess:
checkpoint_step = sess.run(global_step)
if checkpoint_step > 0:
checkpoint_step += 1
eval_interval_steps = flags.eval_interval_steps
for step in range(checkpoint_step, flags.number_of_steps):
# get batch of data to compute classification loss
images_deploy, labels_deploy, images_task_encode, labels_task_encode_real, labels_task_encode = \
few_shot_data_train.next_few_shot_batch_wordemb(deploy_batch_size=flags.train_batch_size,
num_classes_test=flags.num_classes_train,
num_shots=flags.num_shots_train,
num_tasks=flags.num_tasks_per_batch)
if flags.augment:
images_deploy = image_augment(images_deploy)
images_task_encode = image_augment(images_task_encode)
feed_dict = {images_deploy_pl: images_deploy.astype(dtype=np.float32), labels_deploy_pl: labels_deploy,
images_task_encode_pl: images_task_encode.astype(dtype=np.float32),
labels_task_encode_pl_real: labels_task_encode_real,
labels_task_encode_pl: labels_task_encode}
t_batch = time.time()
feed_dict = create_hard_negative_batch(misclass, feed_dict, sess, few_shot_data_train, flags,
images_deploy_pl, labels_deploy_pl, images_task_encode_pl,
labels_task_encode_pl_real)
dt_batch = time.time() - t_batch
t_train = time.time()
loss,alpha_np = sess.run([train_op,alpha], feed_dict=feed_dict)
dt_train = time.time() - t_train
if step % 100 == 0:
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
logging.info("step %d, loss : %.4g, dt: %.3gs, dt_batch: %.3gs" % (step, loss, dt_train, dt_batch))
fout.write("step: "+str(step)+' loss: '+str(loss)+'\n')
if float(step) / flags.number_of_steps > 0.5:
eval_interval_steps = flags.eval_interval_fine_steps
if eval_interval_steps > 0 and step % eval_interval_steps == 0:
saver.save(sess, os.path.join(log_dir, 'model'), global_step=step)
eval(flags=flags, is_primary=True, fout=fout)
if float(step) > 0.5 * flags.number_of_steps + flags.number_of_steps_to_early_stop:
break
class ModelLoader:
def __init__(self, model_path, batch_size, is_primary, split):
self.batch_size = batch_size
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir=os.path.join(model_path, 'train'))
step = int(os.path.basename(latest_checkpoint).split('-')[1])
flags = Namespace(load_and_save_params(default_params=dict(), exp_dir=model_path))
image_size = get_image_size(flags.data_dir)
with tf.Graph().as_default():
images_deploy_pl, labels_deploy_pl = placeholder_inputs(batch_size=batch_size,
image_size=image_size, scope='inputs/deploy')
if is_primary:
task_encode_batch_size = flags.num_classes_test * flags.num_shots_test
images_task_encode_pl, _ = placeholder_inputs(batch_size=task_encode_batch_size,
image_size=image_size,
scope='inputs/task_encode')
with tf.variable_scope('inputs/task_encode'):
labels_task_encode_pl_real = tf.placeholder(tf.int64,
shape=(flags.num_classes_test), name='labels_real')
labels_task_encode_pl = tf.placeholder(tf.int64,
shape=(flags.num_classes_test),
name='labels')
self.vocab_size = tf.placeholder(tf.float32, shape=(), name='vocab_size')
self.tensor_images_deploy = images_deploy_pl
self.tensor_labels_deploy = labels_deploy_pl
self.tensor_labels_task_encode_real = labels_task_encode_pl_real
self.tensor_labels_task_encode = labels_task_encode_pl
self.tensor_images_task_encode = images_task_encode_pl
emb_path = os.path.join(flags.data_dir, 'few-shot-wordemb-{}.npz'.format(split))
embedding_train = np.load(emb_path)["features"].astype(np.float32)
print(embedding_train.dtype)
logging.info("Loading mini-imagenet...")
W = tf.constant(embedding_train, name="W_"+split)
label_embeddings_train = tf.nn.embedding_lookup(W, labels_task_encode_pl_real)
# Primary task operations
logits, regularizer_logits, features_sample, features_query, self.alpha = build_inference_graph(images_deploy_pl=images_deploy_pl,
images_task_encode_pl=images_task_encode_pl,
flags=flags, is_training=False, is_primary=True,
label_embeddings=label_embeddings_train)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=tf.one_hot(labels_deploy_pl, flags.num_classes_test)))
regularizer_loss = 0.0
# Losses and optimizer
regu_losses = slim.losses.get_regularization_losses()
loss = tf.add_n([loss] + regu_losses + [regularizer_loss])
init_fn = slim.assign_from_checkpoint_fn(
latest_checkpoint,
slim.get_model_variables('Model'))
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# Run init before loading the weights
self.sess.run(tf.global_variables_initializer())
# Load weights
init_fn(self.sess)
self.flags = flags
self.logits = logits
self.loss = loss
self.features_sample = features_sample
self.features_query = features_query
self.logits_size = self.logits.get_shape().as_list()[-1]
self.step = step
self.is_primary = is_primary
log_dir = get_logdir_name(flags)
graphpb_txt = str(tf.get_default_graph().as_graph_def())
pathlib.Path(os.path.join(log_dir, 'eval')).mkdir(parents=True, exist_ok=True)
with open(os.path.join(log_dir, 'eval', 'graph.pbtxt'), 'w') as f:
f.write(graphpb_txt)
def eval(self, data_dir, num_cases_test, split='target_val'):
data_set = Dataset(_load_mini_imagenet(data_dir=data_dir, split=split))
num_batches = num_cases_test // self.batch_size
num_correct = 0.0
num_tot = 0.0
loss_tot = 0.0
final_alpha=[]
for i in range(num_batches):
num_classes, num_shots = self.flags.num_classes_test, self.flags.num_shots_test
images_deploy, labels_deploy, images_task_encode, labels_task_encode_real, labels_task_encode = \
data_set.next_few_shot_batch_wordemb(deploy_batch_size=self.batch_size,
num_classes_test=num_classes, num_shots=num_shots,
num_tasks=1)
feed_dict = {self.tensor_images_deploy: images_deploy.astype(dtype=np.float32),
self.tensor_labels_task_encode_real: labels_task_encode_real,
self.tensor_labels_deploy: labels_deploy,
self.tensor_labels_task_encode: labels_task_encode,
self.tensor_images_task_encode: images_task_encode.astype(dtype=np.float32)}
[logits, loss, alpha] = self.sess.run([self.logits, self.loss, self.alpha], feed_dict)
final_alpha.append(alpha)
labels_deploy_pred = | np.argmax(logits, axis=-1) | numpy.argmax |
"""
Transformer
=================================
This example shows how to implement the Transformer model with Gluon NLP Toolkit.
@inproceedings{vaswani2017attention,
title={Attention is all you need},
author={Vaswani, Ashish and Shazeer, Noam and <NAME> and Uszkoreit, Jakob and Jones,
Llion and Gomez, <NAME> and <NAME> and Polosukhin, Illia},
booktitle={Advances in Neural Information Processing Systems},
pages={6000--6010},
year={2017}
}
"""
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name,logging-format-interpolation
import argparse
import time
import random
import os
import io
import logging
import math
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet.gluon.data import ArrayDataset, SimpleDataset
from mxnet.gluon.data import DataLoader
import gluonnlp.data.batchify as btf
from gluonnlp.data import NLTKMosesDetokenizer
from gluonnlp.data import ConstWidthBucket, LinearWidthBucket, ExpWidthBucket,\
FixedBucketSampler, IWSLT2015, WMT2016, WMT2016BPE, WMT2014, WMT2014BPE
from gluonnlp.model import BeamSearchScorer
from translation import NMTModel, BeamSearchTranslator
from transformer import get_transformer_encoder_decoder
from loss import SoftmaxCEMaskedLoss, LabelSmoothing
from utils import logging_config
from bleu import _bpe_to_words, compute_bleu
import _constants as _C
np.random.seed(100)
random.seed(100)
mx.random.seed(10000)
parser = argparse.ArgumentParser(description='Neural Machine Translation Example.'
'We train the Transformer Model')
parser.add_argument('--dataset', type=str, default='WMT2016BPE', help='Dataset to use.')
parser.add_argument('--src_lang', type=str, default='en', help='Source language')
parser.add_argument('--tgt_lang', type=str, default='de', help='Target language')
parser.add_argument('--epochs', type=int, default=10, help='upper epoch limit')
parser.add_argument('--num_units', type=int, default=512, help='Dimension of the embedding '
'vectors and states.')
parser.add_argument('--hidden_size', type=int, default=2048,
help='Dimension of the hidden state in position-wise feed-forward networks.')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--epsilon', type=float, default=0.1,
help='epsilon parameter for label smoothing')
parser.add_argument('--num_layers', type=int, default=6,
help='number of layers in the encoder and decoder')
parser.add_argument('--num_heads', type=int, default=8,
help='number of heads in multi-head attention')
parser.add_argument('--scaled', action='store_true', help='Turn on to use scale in attention')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size. Number of tokens in a minibatch')
parser.add_argument('--beam_size', type=int, default=4, help='Beam size')
parser.add_argument('--lp_alpha', type=float, default=0.6,
help='Alpha used in calculating the length penalty')
parser.add_argument('--lp_k', type=int, default=5, help='K used in calculating the length penalty')
parser.add_argument('--test_batch_size', type=int, default=256, help='Test batch size')
parser.add_argument('--num_buckets', type=int, default=10, help='Bucket number')
parser.add_argument('--bucket_scheme', type=str, default='constant',
help='Strategy for generating bucket keys. It supports: '
'"constant": all the buckets have the same width; '
'"linear": the width of bucket increases linearly; '
'"exp": the width of bucket increases exponentially')
parser.add_argument('--bucket_ratio', type=float, default=0.0, help='Ratio for increasing the '
'throughput of the bucketing')
parser.add_argument('--src_max_len', type=int, default=-1, help='Maximum length of the source '
'sentence, -1 means no clipping')
parser.add_argument('--tgt_max_len', type=int, default=-1, help='Maximum length of the target '
'sentence, -1 means no clipping')
parser.add_argument('--optimizer', type=str, default='adam', help='optimization algorithm')
parser.add_argument('--lr', type=float, default=1.0, help='Initial learning rate')
parser.add_argument('--warmup_steps', type=float, default=4000,
help='number of warmup steps used in NOAM\'s stepsize schedule')
parser.add_argument('--num_accumulated', type=int, default=1,
help='Number of steps to accumulate the gradients. '
'This is useful to mimic large batch training with limited gpu memory')
parser.add_argument('--magnitude', type=float, default=3.0,
help='Magnitude of Xavier initialization')
parser.add_argument('--average_checkpoint', action='store_true',
help='Turn on to perform final testing based on '
'the average of last few checkpoints')
parser.add_argument('--num_averages', type=int, default=5,
help='Perform final testing based on the '
'average of last num_averages checkpoints. '
'This is only used if average_checkpoint is True')
parser.add_argument('--average_start', type=int, default=5,
help='Perform average SGD on last average_start epochs')
parser.add_argument('--full', action='store_true',
help='In default, we use the test dataset in'
' http://statmt.org/wmt14/test-filtered.tgz.'
' When the option full is turned on, we use the test dataset in'
' http://statmt.org/wmt14/test-full.tgz')
parser.add_argument('--bleu', type=str, default='tweaked',
help='Schemes for computing bleu score. It can be: '
'"tweaked": it uses similar steps in get_ende_bleu.sh in tensor2tensor '
'repository, where compound words are put in ATAT format; '
'"13a": This uses official WMT tokenization and produces the same results'
' as official script (mteval-v13a.pl) used by WMT; '
'"intl": This use international tokenization in mteval-v14a.pl')
parser.add_argument('--log_interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--save_dir', type=str, default='transformer_out',
help='directory path to save the final model and training log')
parser.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu.'
'(using single gpu is suggested)')
args = parser.parse_args()
logging_config(args.save_dir)
logging.info(args)
def cache_dataset(dataset, prefix):
"""Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
"""
if not os.path.exists(_C.CACHE_PATH):
os.makedirs(_C.CACHE_PATH)
src_data = np.array([ele[0] for ele in dataset])
tgt_data = np.array([ele[1] for ele in dataset])
np.savez(os.path.join(_C.CACHE_PATH, prefix + '.npz'), src_data=src_data, tgt_data=tgt_data)
def load_cached_dataset(prefix):
cached_file_path = os.path.join(_C.CACHE_PATH, prefix + '.npz')
if os.path.exists(cached_file_path):
print('Load cached data from {}'.format(cached_file_path))
dat = np.load(cached_file_path)
return ArrayDataset(np.array(dat['src_data']), | np.array(dat['tgt_data']) | numpy.array |
# Subgrid refinement and error estimation of redshift value found by
# redmonster.physics.zfinder.py .
# Interpolates both between redshift pixel lags and between model parameters.
#
# <NAME>, University of Utah @ IAC, May 2014
# Significant updates to z_refine() -> z_refine2() by TH, July 2015
# <EMAIL>
import numpy as n
import matplotlib as m
from matplotlib import pyplot as p
m.interactive(True)
from redmonster.physics.misc import quadfit
from redmonster.physics import grid_spline as gs
class ZFitter:
def __init__(self, zchi2, zbase):
self.zchi2 = zchi2
self.zbase = zbase
self.z = | n.zeros((zchi2.shape[0],5)) | numpy.zeros |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy.signal
import GPy
from sklearn.decomposition import PCA
from hdbscan import HDBSCAN
from tqdm import tqdm
import numpy.matlib
def fourier_transform(waveform, num_harmonics, len_spw):
step=np.arange(0,len_spw,1)
a=[]
b=[]
for order in range(1, num_harmonics + 1):
w_cos = waveform * np.cos(order * 2 * np.pi / len_spw * step)
w_sin = waveform * np.sin(order * 2 * np.pi / len_spw * step)
a.append((2/len_spw)*w_cos.sum())
b.append((2/len_spw)*w_sin.sum())
return [np.array(a),np.array(b)]
def delt(base,target):
delt = target - base
if np.abs(delt) > np.pi:
if base >= 0:
delt = 2*np.pi + delt
else:
delt = delt - 2*np.pi
return delt
def sampling_timing(init_site, width_extended = 100, width_detected = 32, step_width = 0.01, len_wave = 20):
shift_quantity = np.round(0.01*np.arange(-99,100),2)
time_coordinate = np.arange(0,len_wave,step_width) + init_site - width_detected + shift_quantity.reshape(-1,1)#x
time_coordinate = np.round(100*time_coordinate,0).astype('int')
integer_idx = np.where(time_coordinate%100==0,True, False)
temp_idx = np.where(integer_idx==True)[1].reshape(199,len_wave)
samp_idx = time_coordinate[integer_idx].reshape(integer_idx.shape[0],len_wave).copy()
samp_idx = np.round(samp_idx/100,0).astype('int')
return samp_idx, temp_idx
class SpikeSorting:
def __init__(self):
self.progress_ = 0
def guidance(self,module_num):
message = ["load_h5()", "highpass_filter()", "spike_detection()", "dataset_division()",
"spectrum_extraction()", "spw_classification()", "template_reconstruction()",
"template_matching()","mpw_classification()","data_summarization()"]
if module_num - self.progress_ > 1:
print("Please run "+message[self.progress_])
return False
else:
return True
def load_h5(self, h5_name):
h5_data = h5py.File(h5_name)
a_group_key = list(h5_data.keys())[0]
self.raw_data_ = h5_data[a_group_key][:,0]
self.progress_ = 1
def highpass_filter(self, bottom=300, sampling_rate=24000):
if self.guidance(2):
nyq = sampling_rate / 2
cutoff = np.array([bottom]) / nyq
numtaps = 255
bpf = scipy.signal.firwin(numtaps, cutoff, pass_zero=False)
self.filtered_data_ = scipy.signal.lfilter(bpf, 1, self.raw_data_)[int((numtaps-1)/2):]
self.progress_ = 2
def spike_detection(self, f_std=3.5):
if self.guidance(3):
wide = 36
self.thr_ = f_std*np.median(np.abs(self.filtered_data_)/0.6745)
peak_idx = scipy.signal.argrelmax(self.filtered_data_,order=5)[0]
peak_idx = peak_idx[np.where(self.filtered_data_[peak_idx]>self.thr_)[0]]
self.detected_waveform_ = [self.filtered_data_[i-wide:i+wide] for i in peak_idx]
self.detected_waveform_ = np.array(self.detected_waveform_).reshape(peak_idx.shape[0],wide*2)
self.peak_idx_ =peak_idx
self.progress_ = 3
def dataset_division(self):
if self.guidance(4):
single_peak_waveform = []; multi_peak_waveform = []; sw_idx = []; mw_idx = []
for i in range(self.detected_waveform_.shape[0]):
peak_idx = scipy.signal.argrelmax(self.detected_waveform_[i,:])[0]
ith_waveform = self.detected_waveform_[i,:].copy()
if len(np.where(ith_waveform[peak_idx]>self.thr_)[0])==1:
single_peak_waveform.append(ith_waveform[4:68].copy())
sw_idx.append(i)
else:
multi_peak_waveform.append(ith_waveform[4:68].copy())
mw_idx.append(i)
self.sw_idx_ = np.array(sw_idx)
self.mw_idx_ = np.array(mw_idx)
self.single_peak_waveform_ = np.array(single_peak_waveform).reshape(self.sw_idx_.shape[0],64)
self.multi_peak_waveform_ = np.array(multi_peak_waveform).reshape(self.mw_idx_.shape[0],64)
self.progress_ = 4
def spectrum_extraction(self, diff_order):
if self.guidance(5):
num_spw = self.single_peak_waveform_.shape[0]
len_spw = self.single_peak_waveform_.shape[1]
self.num_spw_ = num_spw
self.len_spw_ = len_spw
num_harmonics = int(self.single_peak_waveform_.shape[1]/2)
self.order_diff_ = diff_order
self.padded_diff_waveform = np.zeros((num_spw,len_spw))
diff_waveform = np.diff(self.single_peak_waveform_, diff_order).copy()*scipy.signal.hamming(len_spw-diff_order)
self.padded_diff_waveform[:,0:len_spw-diff_order] = np.copy(diff_waveform.T - diff_waveform.mean(1)).T
self.fourier_coeff_a = np.zeros((num_spw, num_harmonics))
self.fourier_coeff_b = np.zeros((num_spw, num_harmonics))
self.phase_ = np.zeros((num_spw,num_harmonics))
self.amplitude_ = np.zeros((num_spw,num_harmonics))
for i in range(num_spw):
self.fourier_coeff_a[i,:], self.fourier_coeff_b[i,:] = fourier_transform(self.padded_diff_waveform[i,:].copy(),num_harmonics, len_spw)
self.phase_[i,:] = | np.arctan2(self.fourier_coeff_b[i,:],self.fourier_coeff_a[i,:]) | numpy.arctan2 |
import numpy as np
import timeit
NUM_RUNS = 10
def mat_trig_upper_dot(mat, v, y, n):
for i in range(n-1,-1,-1):
for j in range(n-1, i-1, -1):
y[i] += mat[i][j] * v[j]
return y
def mat_trig_upper_axpy(mat, v, y, n):
for j in range(n-1,-1,-1):
for i in range(j+1):
y[i] += mat[i][j] * v[j]
return y
def mat_trig_lower_dot(mat, v, y, n):
for i in range(n):
for j in range(i+1):
y[i] += mat[i][j] * v[j]
return y
def mat_trig_lower_axpy(mat, v, y, n):
for j in range(n):
for i in range(n-1, j-1, -1):
y[i] += mat[i][j] * v[j]
return y
def mat_sym_trig_upper_dot(mat, v, y, n):
for i in range(n):
for j in range(n):
if i > j:
y[i] += mat[j][i] * v[j]
else:
y[i] += mat[i][j] * v[j]
return y
def mat_vector_dot(mat, v, y, n):
for i in range(n):
for j in range(n):
y[i] += mat[i][j] * v[j]
return y
def assertion(func, mat, v, n):
y = np.zeros(n, dtype=int)
# numpy
dot = np.dot(mat, v)
# mine
my_dot = func(mat, v, y, n)
return np.array_equal(dot, my_dot)
def make_tests():
mat_trig_upper = np.array([
[1,2,3],
[0,4,5],
[0,0,6]
])
mat_trig_lower = np.array([
[1,0,0],
[2,3,0],
[4,5,6]
])
mat_sym = np.array([
[1,2,3],
[2,5,6],
[3,6,7]
])
mat = np.array([
[1,2,3],
[4,5,6],
[7,8,9]
])
v = np.array([1,0,1])
n = len(v)
# Testes
assert assertion(mat_trig_upper_dot,mat_trig_upper,v,n)
print("Matriz Triangular Superior Dot - [!] PASSOU")
assert assertion(mat_trig_lower_dot,mat_trig_lower,v,n)
print("Matriz Triangular Inferior Dot - [!] PASSOU")
assert assertion(mat_sym_trig_upper_dot,mat_sym,v,n)
print("Matriz Simétrica Dot - [!] PASSOU")
assert assertion(mat_vector_dot,mat,v,n)
print("Matriz-Vetor Dot - [!] PASSOU\n\n")
def benchmark(func, msg=None, *args):
duration = timeit.Timer(lambda: func(*args)).timeit(number = NUM_RUNS)
avg_duration = duration/NUM_RUNS
if msg:
print(f'[+] {msg}: Demorou, aproximadamente, {avg_duration} segundos')
if "__main__" == __name__:
make_tests()
n = 3000
v = np.random.randint(low=1, high=5, size=n, dtype=int)
mat = np.random.randint(low=1, high=10, size=(n, n), dtype=int)
mat_trig_upper = np.triu(mat)
mat_trig_lower = np.tril(mat)
#mat_sym = np.tril(mat) + np.tril(mat, -1).T
print("### Dot: Matriz Triangular Superior ###")
benchmark(np.dot, "Numpy func", mat_trig_upper, v)
y = np.zeros(n, dtype=int)
benchmark(mat_vector_dot, "Naive func", mat_trig_upper, v, y, n)
y = np.zeros(n, dtype=int)
benchmark(mat_trig_upper_dot, "Efficient func", mat_trig_upper, v, y, n)
print("### Dot: Matriz Triangular Inferior ###")
benchmark(np.dot, "Numpy func", mat_trig_lower, v)
y = np.zeros(n, dtype=int)
benchmark(mat_vector_dot, "Naive func", mat_trig_lower, v, y, n)
y = np.zeros(n, dtype=int)
benchmark(mat_trig_lower_dot, "Efficient func", mat_trig_lower, v, y, n)
print("### AxPy: Matriz Triangular Superior ###")
benchmark(np.dot, "Numpy func", mat_trig_upper, v)
y = np.zeros(n, dtype=int)
benchmark(mat_vector_dot, "Naive func", mat_trig_upper, v, y, n)
y = np.zeros(n, dtype=int)
benchmark(mat_trig_upper_axpy, "Efficient func", mat_trig_upper, v, y, n)
print("### AxPy: Matriz Triangular Inferior ###")
benchmark(np.dot, "Numpy func", mat_trig_lower, v)
y = | np.zeros(n, dtype=int) | numpy.zeros |
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
def test_piecewise_linear_schedule():
s = canopy.schedules.PiecewiseLinearSchedule([(2, 10),
(4, 15),
(7, -2)])
ans = np.array([10,
10,
12.5,
15,
(2 * 15 + -2) / 3,
(15 + 2 * -2) / 3,
-2,
-2,
-2,
-2])
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_discrete_schedule():
s = canopy.schedules.DiscreteSchedule([(2, 10),
(4, 15),
-2])
ans = np.array([10, 10, 15, 15, -2, -2, -2, -2, -2, -2])
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
s = canopy.schedules.DiscreteSchedule([-2])
ans = np.array([-2, -2, -2, -2, -2])
res = np.array([s(None, None) for _ in range(5)])
np.testing.assert_allclose(ans, res)
def test_step_schedule():
s = canopy.schedules.StepSchedule(1, 2, [3, 5, 9])
ans = np.array([1, 1, 2, 2, 4, 4, 4, 4, 8, 8])
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_recurring_step_schedule():
s = canopy.schedules.RecurringStepSchedule(1, 2, 3)
ans = np.array([1, 1, 2, 2, 2, 4, 4, 4, 8, 8])
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_inverse_decay_schedule():
s = canopy.schedules.InverseDecaySchedule(1, 0.1, -2)
ans = np.array([1, 1.1 ** 2, 1.2 ** 2, 1.3 ** 2, 1.4 ** 2])
res = np.array([s(None, None) for _ in range(5)])
np.testing.assert_allclose(ans, res)
def test_fixed_schedule():
s = canopy.schedules.FixedSchedule(42)
ans = np.array([42] * 10)
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_exponential_schedule():
s = canopy.schedules.ExponentialSchedule(2.3, 0.7)
ans = 2.3 * 0.7 ** np.arange(10)
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_half_life_schedule():
s = canopy.schedules.HalfLifeSchedule(1, 2)
ans = np.array([1, np.sqrt(0.5), 0.5, np.sqrt(0.125), 0.25])
res = np.array([s(None, None) for _ in range(5)])
np.testing.assert_allclose(ans, res)
def test_multi_stage_schedule():
s = canopy.schedules.MultiStageSchedule(
[(2, canopy.schedules.FixedSchedule(2)),
(5, canopy.schedules.ExponentialSchedule(3, 2)),
canopy.schedules.FixedSchedule(1)])
ans = np.array([2, 2, 3, 6, 12, 1, 1, 1, 1, 1])
res = np.array([s(None, None) for _ in range(10)])
np.testing.assert_allclose(ans, res)
def test_piecewise_log_linear_schedule():
s = canopy.schedules.PiecewiseLogLinearSchedule([(2, 1.0),
(5, 1e-3)])
ans = | np.array([1, 1, 1e-1, 1e-2, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3]) | numpy.array |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package implement BiGraph structure for handling bigraph data.
"""
import os
import json
import paddle
import copy
import numpy as np
from pgl.utils import op
import pgl.graph_kernel as graph_kernel
from pgl.message import Message
from collections import defaultdict
from pgl.utils.helper import check_is_tensor, scatter, generate_segment_id_from_index, maybe_num_nodes, unique_segment
from pgl.utils.edge_index import EdgeIndex
import paddle.distributed as dist
import warnings
class BiGraph(object):
"""Implementation of bigraph interface in pgl.
This is a simple implementation of graph structure in pgl.
`pgl.BiGraph` is an alias for `pgl.graph.BiGraph`
Args:
edges: list of (u, v) tuples, 2D numpy.ndarry or 2D paddle.Tensor
src_num_nodes (optional: int, numpy or paddle.Tensor): Number of src_nodes in a bigraph.
If not provided, the number of src nodes will be infered from edges[:, 0].
dst_num_nodes (optional: int, numpy or paddle.Tensor): Number of dst nodes in a bigraph.
If not provided, the number of dst_nodes will be infered from edges[:, 1].
src_node_feat (optional): a dict of numpy array as src node features
dst_node_feat (optional): a dict of numpy array as dst node features
edge_feat (optional): a dict of numpy array as edge features (should
have consistent order with edges)
Examples 1:
- Create a graph with numpy.
- Convert it into paddle.Tensor .
- Do send recv for graph neural network.
.. code-block:: python
import numpy as np
import pgl
src_num_nodes = 4
dst_num_nodes = 5
edges = [ (0, 1), (1, 2), (3, 4)]
src_feat = np.random.randn(4, 100).astype(np.float32)
dst_feat = np.random.randn(5, 100).astype(np.float32)
edge_feature = np.random.randn(3, 100).astype(np.float32)
graph = pgl.BiGraph(
src_num_nodes=src_num_nodes,
dst_num_nodes=dst_num_nodes,
edges=edges,
src_node_feat={
"src_feat": src_feat
},
dst_node_feat={
"dst_feat": dst_feat
},
edge_feat={
"edge_feature": edge_feature
})
graph.tensor()
Examples 2:
- Create a graph with paddle.Tensor.
- Do send recv for graph neural network.
.. code-block:: python
import numpy as np
import pgl
src_num_nodes = 4
dst_num_nodes = 5
edges = paddle.to_tensor([ (0, 1), (1, 2), (3, 4)])
src_feat = np.random.randn(4, 100).astype(np.float32)
src_feat = paddle.to_tensor(src_feat)
dst_feat = np.random.randn(5, 100).astype(np.float32)
dst_feat = paddle.to_tensor(dst_feat)
edge_feature = np.random.randn(3, 100).astype(np.float32)
edge_feature = paddle.to_tensor(edge_feature)
graph = pgl.BiGraph(
src_num_nodes=src_num_nodes,
dst_num_nodes=dst_num_nodes,
edges=edges,
src_node_feat={
"src_feat": src_feat
},
dst_node_feat={
"dst_feat": dst_feat
},
edge_feat={
"edge_feature": edge_feature
})
"""
def __init__(self,
edges,
src_num_nodes=None,
dst_num_nodes=None,
src_node_feat=None,
dst_node_feat=None,
edge_feat=None,
**kwargs):
if src_node_feat is not None:
self._src_node_feat = src_node_feat
else:
self._src_node_feat = {}
if dst_node_feat is not None:
self._dst_node_feat = dst_node_feat
else:
self._dst_node_feat = {}
if edge_feat is not None:
self._edge_feat = edge_feat
else:
self._edge_feat = {}
if not check_is_tensor(edges):
if isinstance(edges, np.ndarray):
if edges.dtype != "int64":
edges = edges.astype("int64")
edges = np.array(edges, dtype="int64")
self._edges = edges
if src_num_nodes is None:
self._src_num_nodes = maybe_num_nodes(self._edges[:, 0])
else:
self._src_num_nodes = src_num_nodes
max_edge_id = maybe_num_nodes(self._edges[:, 0])
if self._src_num_nodes < max_edge_id:
raise ValueError("The max src edge ID should be less than the number of src nodes. "
"But got max src edge ID [%s] >= src_num_nodes [%s]" \
% (max_edge_id-1, self._src_num_nodes))
if dst_num_nodes is None:
self._dst_num_nodes = maybe_num_nodes(self._edges[:, 1])
else:
self._dst_num_nodes = dst_num_nodes
max_edge_id = maybe_num_nodes(self._edges[:, 1])
if self._dst_num_nodes < max_edge_id:
raise ValueError("The max dst edge ID should be less than the number of dst nodes. "
"But got max dst edge ID [%s] >= dst_num_nodes [%s]" \
% (max_edge_id-1, self._dst_num_nodes))
self._adj_src_index = kwargs.get("adj_src_index", None)
self._adj_dst_index = kwargs.get("adj_dst_index", None)
if check_is_tensor(self._src_num_nodes, self._dst_num_nodes,
self._edges, *list(self._src_node_feat.values()),
*list(self._dst_node_feat.values()),
*list(self._edge_feat.values())):
self._is_tensor = True
elif self._adj_src_index is not None and self._adj_src_index.is_tensor(
):
self._is_tensor = True
elif self._adj_dst_index is not None and self._adj_dst_index.is_tensor(
):
self._is_tensor = True
else:
self._is_tensor = False
if self._is_tensor:
# ensure all variable is tenosr
if not check_is_tensor(self._src_num_nodes):
self._src_num_nodes = paddle.to_tensor(self._src_num_nodes)
if not check_is_tensor(self._dst_num_nodes):
self._dst_num_nodes = paddle.to_tensor(self._dst_num_nodes)
if not check_is_tensor(self._edges):
self._edges = paddle.to_tensor(self._edges)
for key in self._src_node_feat:
if not check_is_tensor(self._src_node_feat[key]):
self._src_node_feat[key] = paddle.to_tensor(
self._src_node_feat[key])
for key in self._dst_node_feat:
if not check_is_tensor(self._dst_node_feat[key]):
self._dst_node_feat[key] = paddle.to_tensor(
self._dst_node_feat[key])
for key in self._edge_feat:
if not check_is_tensor(self._edge_feat[key]):
self._edge_feat[key] = paddle.to_tensor(self._edge_feat[
key])
if self._adj_src_index is not None:
if not self._adj_src_index.is_tensor():
self._adj_src_index.tensor(inplace=True)
if self._adj_dst_index is not None:
if not self._adj_dst_index.is_tensor():
self._adj_dst_index.tensor(inplace=True)
# preprocess graph level informations
self._process_graph_info(**kwargs)
self._src_nodes = None
self._dst_nodes = None
def __repr__(self):
"""Pretty Print the BiGraph
"""
repr_dict = {"class": self.__class__.__name__}
if self._is_tensor:
src_num_nodes = self.src_num_nodes.numpy()
dst_num_nodes = self.dst_num_nodes.numpy()
else:
src_num_nodes = self.src_num_nodes
dst_num_nodes = sefl.dst_num_nodes
repr_dict["src_num_nodes"] = int(src_num_nodes)
repr_dict["dst_num_nodes"] = int(dst_num_nodes)
repr_dict["edges_shape"] = self.edges.shape
repr_dict["src_node_feat"] = []
for key, value in self.src_node_feat.items():
repr_dict["src_node_feat"].append({
"name": key,
"shape": list(value.shape),
"dtype": str(value.dtype)
})
repr_dict["dst_node_feat"] = []
for key, value in self.dst_node_feat.items():
repr_dict["dst_node_feat"].append({
"name": key,
"shape": list(value.shape),
"dtype": str(value.dtype)
})
repr_dict["edge_feat"] = []
for key, value in self.edge_feat.items():
repr_dict["edge_feat"].append({
"name": key,
"shape": list(value.shape),
"dtype": str(value.dtype)
})
return json.dumps(repr_dict, ensure_ascii=False)
@classmethod
def load(cls, path, mmap_mode="r"):
"""Load BiGraph from path and return a BiGraph in numpy.
Args:
path: The directory path of the stored BiGraph.
mmap_mode: Default :code:`mmap_mode="r"`. If not None, memory-map the graph.
"""
src_num_nodes = np.load(
os.path.join(path, 'src_num_nodes.npy'), mmap_mode=mmap_mode)
dst_num_nodes = np.load(
os.path.join(path, 'dst_num_nodes.npy'), mmap_mode=mmap_mode)
edges = np.load(os.path.join(path, 'edges.npy'), mmap_mode=mmap_mode)
num_graph = np.load(
os.path.join(path, 'num_graph.npy'), mmap_mode=mmap_mode)
if os.path.exists(os.path.join(path, 'graph_src_node_index.npy')):
graph_src_node_index = np.load(
os.path.join(path, 'graph_src_node_index.npy'),
mmap_mode=mmap_mode)
else:
graph_src_node_index = None
if os.path.exists(os.path.join(path, 'graph_dst_node_index.npy')):
graph_dst_node_index = np.load(
os.path.join(path, 'graph_dst_node_index.npy'),
mmap_mode=mmap_mode)
else:
graph_dst_node_index = None
if os.path.exists(os.path.join(path, 'graph_edge_index.npy')):
graph_edge_index = np.load(
os.path.join(path, 'graph_edge_index.npy'),
mmap_mode=mmap_mode)
else:
graph_edge_index = None
if os.path.isdir(os.path.join(path, 'adj_src')):
adj_src_index = EdgeIndex.load(
os.path.join(path, 'adj_src'), mmap_mode=mmap_mode)
else:
adj_src_index = None
if os.path.isdir(os.path.join(path, 'adj_dst')):
adj_dst_index = EdgeIndex.load(
os.path.join(path, 'adj_dst'), mmap_mode=mmap_mode)
else:
adj_dst_index = None
def _load_feat(feat_path):
"""Load features from .npy file.
"""
feat = {}
if os.path.isdir(feat_path):
for feat_name in os.listdir(feat_path):
feat[os.path.splitext(feat_name)[0]] = np.load(
os.path.join(feat_path, feat_name),
mmap_mode=mmap_mode)
return feat
src_node_feat = _load_feat(os.path.join(path, 'src_node_feat'))
dst_node_feat = _load_feat(os.path.join(path, 'dst_node_feat'))
edge_feat = _load_feat(os.path.join(path, 'edge_feat'))
return cls(edges=edges,
src_num_nodes=src_num_nodes,
dst_num_nodes=dst_num_nodes,
src_node_feat=src_node_feat,
dst_node_feat=dst_node_feat,
edge_feat=edge_feat,
adj_src_index=adj_src_index,
adj_dst_index=adj_dst_index,
_num_graph=num_graph,
_graph_src_node_index=graph_src_node_index,
_graph_dst_node_index=graph_dst_node_index,
_graph_edge_index=graph_edge_index)
def is_tensor(self):
"""Return whether the BiGraph is in paddle.Tensor or numpy format.
"""
return self._is_tensor
def _apply_to_tensor(self, key, value, inplace=True):
if value is None:
return value
if key == '_is_tensor':
# set is_tensor to True
return True
if isinstance(value, EdgeIndex):
value = value.tensor(inplace=inplace)
elif isinstance(value, dict):
if inplace:
for k, v in value.items():
value[k] = paddle.to_tensor(v)
else:
new_value = {}
for k, v in value.items():
new_value[k] = paddle.to_tensor(v)
value = new_value
else:
value = paddle.to_tensor(value)
return value
def tensor(self, inplace=True):
"""Convert the BiGraph into paddle.Tensor format.
In paddle.Tensor format, the bigraph edges and node features are in paddle.Tensor format.
You can use send and recv in paddle.Tensor bigraph.
Args:
inplace: (Default True) Whether to convert the bigraph into tensor inplace.
"""
if self._is_tensor:
return self
if inplace:
for key in self.__dict__:
self.__dict__[key] = self._apply_to_tensor(
key, self.__dict__[key], inplace)
return self
else:
new_dict = {}
for key in self.__dict__:
new_dict[key] = self._apply_to_tensor(key, self.__dict__[key],
inplace)
graph = self.__class__(
src_num_nodes=new_dict["_src_num_nodes"],
dst_num_nodes=new_dict["_dst_num_nodes"],
edges=new_dict["_edges"],
src_node_feat=new_dict["_src_node_feat"],
dst_node_feat=new_dict["_dst_node_feat"],
edge_feat=new_dict["_edge_feat"],
adj_src_index=new_dict["_adj_src_index"],
adj_dst_index=new_dict["_adj_dst_index"],
**new_dict)
return graph
def _apply_to_numpy(self, key, value, inplace=True):
if value is None:
return value
if key == '_is_tensor':
# set is_tensor to True
return False
if isinstance(value, EdgeIndex):
value = value.numpy(inplace=inplace)
elif isinstance(value, dict):
if inplace:
for k, v in value.items():
value[k] = v.numpy()
else:
new_value = {}
for k, v in value.items():
new_value[k] = v.numpy()
value = new_value
else:
value = value.numpy()
return value
def numpy(self, inplace=True):
"""Convert the BiGraph into numpy format.
In numpy format, the bigraph edges and node features are in numpy.ndarray format.
But you can't use send and recv in numpy bigraph.
Args:
inplace: (Default True) Whether to convert the bigraph into numpy inplace.
"""
if not self._is_tensor:
return self
if inplace:
for key in self.__dict__:
self.__dict__[key] = self._apply_to_numpy(
key, self.__dict__[key], inplace)
return self
else:
new_dict = {}
for key in self.__dict__:
new_dict[key] = self._apply_to_numpy(key, self.__dict__[key],
inplace)
graph = self.__class__(
src_num_nodes=new_dict["_src_num_nodes"],
dst_num_nodes=new_dict["_dst_num_nodes"],
edges=new_dict["_edges"],
src_node_feat=new_dict["_src_node_feat"],
dst_node_feat=new_dict["_dst_node_feat"],
edge_feat=new_dict["_edge_feat"],
adj_src_index=new_dict["_adj_src_index"],
adj_dst_index=new_dict["_adj_dst_index"],
**new_dict)
return graph
def dump(self, path):
"""Dump the bigraph into a directory.
This function will dump the bigraph information into the given directory path.
The bigraph can be read back with :code:`pgl.BiGraph.load`
Args:
path: The directory for the storage of the bigraph.
"""
if self._is_tensor:
# Convert back into numpy and dump.
graph = self.numpy(inplace=False)
graph.dump(path)
else:
if not os.path.exists(path):
os.makedirs(path)
np.save(
os.path.join(path, 'src_num_nodes.npy'), self._src_num_nodes)
np.save(
os.path.join(path, 'dst_num_nodes.npy'), self._dst_num_nodes)
np.save(os.path.join(path, 'edges.npy'), self._edges)
np.save(os.path.join(path, 'num_graph.npy'), self._num_graph)
if self._adj_src_index is not None:
self._adj_src_index.dump(os.path.join(path, 'adj_src'))
if self._adj_dst_index is not None:
self._adj_dst_index.dump(os.path.join(path, 'adj_dst'))
if self._graph_src_node_index is not None:
np.save(
os.path.join(path, 'graph_src_node_index.npy'),
self._graph_src_node_index)
if self._graph_dst_node_index is not None:
np.save(
os.path.join(path, 'graph_dst_node_index.npy'),
self._graph_dst_node_index)
if self._graph_edge_index is not None:
np.save(
os.path.join(path, 'graph_edge_index.npy'),
self._graph_edge_index)
def _dump_feat(feat_path, feat):
"""Dump all features to .npy file.
"""
if len(feat) == 0:
return
if not os.path.exists(feat_path):
os.makedirs(feat_path)
for key in feat:
value = feat[key]
np.save(os.path.join(feat_path, key + ".npy"), value)
_dump_feat(os.path.join(path, "src_node_feat"), self.src_node_feat)
_dump_feat(os.path.join(path, "dst_node_feat"), self.dst_node_feat)
_dump_feat(os.path.join(path, "edge_feat"), self.edge_feat)
@property
def adj_src_index(self):
"""Return an EdgeIndex object for src.
"""
if self._adj_src_index is None:
u = self._edges[:, 0]
v = self._edges[:, 1]
self._adj_src_index = EdgeIndex.from_edges(
u=u, v=v, num_nodes=self._src_num_nodes)
return self._adj_src_index
@property
def adj_dst_index(self):
"""Return an EdgeIndex object for dst.
"""
if self._adj_dst_index is None:
v = self._edges[:, 0]
u = self._edges[:, 1]
self._adj_dst_index = EdgeIndex.from_edges(
u=u, v=v, num_nodes=self._dst_num_nodes)
return self._adj_dst_index
@property
def edge_feat(self):
"""Return a dictionary of edge features.
"""
return self._edge_feat
@property
def src_node_feat(self):
"""Return a dictionary of src node features.
"""
return self._src_node_feat
@property
def dst_node_feat(self):
"""Return a dictionary of dst node features.
"""
return self._dst_node_feat
@property
def num_edges(self):
"""Return the number of edges.
"""
if self._is_tensor:
return paddle.shape(self._edges)[0]
else:
return self._edges.shape[0]
@property
def src_num_nodes(self):
"""Return the number of src nodes.
"""
return self._src_num_nodes
@property
def dst_num_nodes(self):
"""Return the number of dst nodes.
"""
return self._dst_num_nodes
@property
def edges(self):
"""Return all edges in numpy.ndarray or paddle.Tensor with shape (num_edges, 2).
"""
return self._edges
def sorted_edges(self, sort_by="src"):
"""Return sorted edges with different strategies.
This function will return sorted edges with different strategy.
If :code:`sort_by="src"`, then edges will be sorted by :code:`src`
nodes and otherwise :code:`dst`.
Args:
sort_by: The type for sorted edges. ("src" or "dst")
Return:
A tuple of (sorted_src, sorted_dst, sorted_eid).
"""
if sort_by not in ["src", "dst"]:
raise ValueError("sort_by should be in 'src' or 'dst'.")
if sort_by == 'src':
src, dst, eid = self.adj_src_index.triples()
else:
dst, src, eid = self.adj_dst_index.triples()
return src, dst, eid
@property
def src_nodes(self):
"""Return all src nodes id from 0 to :code:`src_num_nodes - 1`
"""
if self._src_nodes is None:
if self.is_tensor():
self._src_nodes = paddle.arange(self.src_num_nodes)
else:
self._src_nodes = | np.arange(self.src_num_nodes) | numpy.arange |
#!/usr/bin/env python
# Copyright 2021
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import ttest_ind
import netCDF4 as nc
import pickle
import os
from PIL import Image as PIL_Image
import sys
import shutil
import glob
import datetime
import time
import calendar
from numpy import genfromtxt
from scipy.optimize import curve_fit
from scipy.cluster.vq import kmeans,vq
from scipy.interpolate import interpn, interp1d
from math import e as e_constant
import math
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import warnings
warnings.filterwarnings("ignore")
plt.style.use('classic')
# font size
# font_size = 14
# matplotlib.rc('font', **{'family': 'serif', 'serif': ['Arial'], 'size': font_size})
# matplotlib.rc('font', weight='bold')
p_progress_writing = False
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
time_format = '%d-%m-%Y_%H:%M'
time_format_khan = '%Y%m%d.0%H'
time_format_mod = '%Y-%m-%d_%H:%M:%S'
time_format_twolines = '%H:%M\n%d-%m-%Y'
time_format_twolines_noYear_noMin_intMonth = '%H\n%d-%m'
time_format_twolines_noYear = '%H:%M\n%d-%b'
time_format_twolines_noYear_noMin = '%H\n%d-%b'
time_format_date = '%Y-%m-%d'
time_format_time = '%H:%M:%S'
time_format_parsivel = '%Y%m%d%H%M'
time_format_parsivel_seconds = '%Y%m%d%H%M%S'
time_str_formats = [
time_format,
time_format_mod,
time_format_twolines,
time_format_twolines_noYear,
time_format_date,
time_format_time,
time_format_parsivel
]
default_cm = cm.jet
cm_vir = cm.viridis
listed_cm_colors_list = ['silver', 'red', 'green', 'yellow', 'blue', 'black']
listed_cm = ListedColormap(listed_cm_colors_list, 'indexed')
colorbar_tick_labels_list_cloud_phase = ['Clear', 'Water', 'SLW', 'Mixed', 'Ice', 'Unknown']
listed_cm_colors_list_cloud_phase = ['white', 'red', 'green', 'yellow', 'blue', 'purple']
listed_cm_cloud_phase = ListedColormap(listed_cm_colors_list_cloud_phase, 'indexed')
avogadros_ = 6.022140857E+23 # molecules/mol
gas_const = 83144.598 # cm3 mbar k-1 mol-1
gas_const_2 = 8.3144621 # J mol-1 K-1
gas_const_water = 461 # J kg-1 K-1
gas_const_dry = 287 # J kg-1 K-1
boltzmann_ = gas_const / avogadros_ # cm3 mbar / k molecules
gravity_ = 9.80665 # m/s
poisson_ = 2/7 # for dry air (k)
latent_heat_v = 2.501E+6 # J/kg
latent_heat_f = 3.337E+5 # J/kg
latent_heat_s = 2.834E+6 # J/kg
heat_capacity__Cp = 1005.7 # J kg-1 K-1 dry air
heat_capacity__Cv = 719 # J kg-1 K-1 water vapor
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
Rs_v = 461.51 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Cp_da = 1004.6 # Specific heat at constant pressure for dry air
Cv_da = 719. # Specific heat at constant volume for dry air
Cp_v = 1870. # Specific heat at constant pressure for water vapour
Cv_v = 1410. # Specific heat at constant volume for water vapour
Cp_lw = 4218 # Specific heat at constant pressure for liquid water
Epsilon = 0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK = 273.15 # Temperature offset between K and C (deg C)
rho_w = 1000. # Liquid Water density kg m^{-3}
grav = 9.80665 # Gravity, m s^{-2}
Lv = 2.5e6 # Latent Heat of vaporisation
boltzmann = 5.67e-8 # Stefan-Boltzmann constant
mv = 18.0153e-3 # Mean molar mass of water vapor(kg/mol)
m_a = 28.9644e-3 # Mean molar mass of air(kg/mol)
Rstar_a = 8.31432 # Universal gas constant for air (N m /(mol K))
path_output = '/g/data/k10/la6753/'
# Misc
class Object_create(object):
pass
def list_files_recursive(path_, filter_str=None):
# create list of raw spectra files
file_list = []
# r=root, d=directories, f = files
if filter_str is None:
for r, d, f in os.walk(path_):
for file in f:
file_list.append(os.path.join(r, file))
else:
for r, d, f in os.walk(path_):
for file in f:
if filter_str in file:
file_list.append(os.path.join(r, file))
return file_list
def list_files(path_, filter_str='*'):
file_list = sorted(glob.glob(str(path_ + filter_str)))
return file_list
def coincidence(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
check_[check_ == check_] = 1
arr_1_checked = arr_1 * check_
arr_2_checked = arr_2 * check_
return arr_1_checked[~np.isnan(arr_1_checked)], arr_2_checked[~np.isnan(arr_2_checked)]
def array_2d_fill_gaps_by_interpolation_linear(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
output_array_X[r_, :][np.isnan(array_[r_, :])] = np.interp(
col_index[np.isnan(array_[r_, :])], coin_out[0], coin_out[1])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
output_array_Y[:, c_][np.isnan(array_[:, c_])] = np.interp(
row_index[np.isnan(array_[:, c_])], coin_out[0], coin_out[1])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def array_2d_fill_gaps_by_interpolation_cubic(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_X[r_, :][np.isnan(array_[r_, :])] = interp_function(col_index[np.isnan(array_[r_, :])])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_Y[:, c_][np.isnan(array_[:, c_])] = interp_function(row_index[np.isnan(array_[:, c_])])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def combine_2_time_series(time_1_reference, data_1, time_2, data_2,
forced_time_step=None, forced_start_time=None, forced_stop_time=None,
cumulative_var_1=False, cumulative_var_2=False):
"""
takes two data sets with respective time series, and outputs the coincident stamps from both data sets
It does this by using mean_discrete() for both sets with the same start stamp and averaging time, the averaging time
is the forced_time_step
:param time_1_reference: 1D array, same units as time_2, this series will define the returned time step reference
:param data_1: can be 1D or 2D array, first dimention most be same as time_1
:param time_2: 1D array, same units as time_1
:param data_2: can be 1D or 2D array, first dimention most be same as time_2
:param window_: optional, if 0 (default) the values at time_1 and time_2 most match exactly, else, the match can
be +- window_
:param forced_time_step: if not none, the median of the differential of the time_1_reference will be used
:param forced_start_time: if not none, the returned series will start at this time stamp
:param forced_stop_time: if not none, the returned series will stop at this time stamp
:param cumulative_var_1: True is you want the variable to be accumulated instead of means, only of 1D data
:param cumulative_var_2: True is you want the variable to be accumulated instead of means, only of 1D data
:return: Index_averaged_1: 1D array, smallest coincident time, without time stamp gaps
:return: Values_mean_1: same shape as data_1 both according to Index_averaged_1 times
:return: Values_mean_2: same shape as data_2 both according to Index_averaged_1 times
"""
# define forced_time_step
if forced_time_step is None:
forced_time_step = np.median(np.diff(time_1_reference))
# find time period
if forced_start_time is None:
first_time_stamp = max(np.nanmin(time_1_reference), np.nanmin(time_2))
else:
first_time_stamp = forced_start_time
if forced_stop_time is None:
last_time_stamp = min(np.nanmax(time_1_reference), np.nanmax(time_2))
else:
last_time_stamp = forced_stop_time
# do the averaging
print('starting averaging of data 1')
if cumulative_var_1:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
print('starting averaging of data 2')
if cumulative_var_2:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
# check that averaged indexes are the same
if np.nansum(np.abs(Index_averaged_1 - Index_averaged_2)) != 0:
print('error during averaging of series, times do no match ????')
return None, None, None
# return the combined, trimmed data
return Index_averaged_1, Values_mean_1, Values_mean_2
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list
def coincidence_multi(array_list):
# only coincidences
parameters_list = array_list
check_ = parameters_list[0]
for param_ in parameters_list[1:]:
check_ = check_ * param_
check_[check_ == check_] = 1
new_arr_list = []
for param_ in parameters_list:
new_arr_list.append(param_ * check_)
check_ = check_ * param_
# delete empty rows_
list_list = []
for param_ in parameters_list:
t_list = []
for i in range(check_.shape[0]):
if check_[i] == check_[i]:
t_list.append(param_[i])
list_list.append(t_list)
# concatenate
ar_list = []
for ii in range(len(parameters_list)):
ar_list.append(np.array(list_list[ii]))
return ar_list
def coincidence_zero(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
# delete empty rows_
list_1 = []
list_2 = []
for i in range(check_.shape[0]):
if check_[i] != 0:
list_1.append(arr_1[i])
list_2.append(arr_2[i])
return np.array(list_1),np.array(list_2)
def discriminate(X_, Y_, Z_, value_disc_list, discrmnt_invert_bin = False):
if discrmnt_invert_bin:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ > value_disc_list[0]] = np.nan
Z_mask[Z_ >= value_disc_list[1]] = 1
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
else:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ < value_disc_list[0]] = np.nan
Z_mask[Z_ > value_disc_list[1]] = np.nan
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
return X_new, Y_new
def add_ratio_to_values(header_, values_, nominator_index, denominator_index, ratio_name, normalization_value=1.):
nominator_data = values_[:,nominator_index]
denominator_data = values_[:,denominator_index]
ratio_ = normalization_value * nominator_data / denominator_data
values_new = np.column_stack((values_,ratio_))
header_new = np.append(header_,ratio_name)
return header_new, values_new
def bin_data(x_val_org,y_val_org, start_bin_edge=0, bin_size=1, min_bin_population=1):
# get coincidences only
x_val,y_val = coincidence(x_val_org,y_val_org)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
# add series
if bin_size >= 1:
x_binned_int = np.array(x_binned, dtype=int)
else:
x_binned_int = x_binned
return x_binned_int, y_binned
def shiftedColorMap(cmap, midpoint=0.5, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(0, 1, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def student_t_test(arr_1, arr_2):
return ttest_ind(arr_1, arr_2, nan_policy='omit')
def k_means_clusters(array_, cluster_number, forced_centers=None):
if forced_centers is None:
centers_, x = kmeans(array_,cluster_number)
data_id, x = vq(array_, centers_)
return centers_, data_id
else:
data_id, x = vq(array_, forced_centers)
return forced_centers, data_id
def grid_(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = np.linspace(min(x), max(x), resX)
yi = np.linspace(min(y), max(y), resY)
Z = matplotlib.mlab.griddata(x, y, z, xi, yi)
X, Y = np.meshgrid(xi, yi)
return X, Y, Z
def find_max_index_2d_array(array_):
return np.unravel_index(np.argmax(array_, axis=None), array_.shape)
def find_min_index_2d_array(array_):
return np.unravel_index(np.argmin(array_, axis=None), array_.shape)
def find_max_index_1d_array(array_):
return np.argmax(array_, axis=None)
def find_min_index_1d_array(array_):
return np.argmin(array_, axis=None)
def time_series_interpolate_discrete(Index_, Values_, index_step, first_index,
position_=0., last_index=None):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param index_step: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
# initialize output matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index) / index_step) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_interp = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_interp[r_] = first_index + (r_ * index_step)
Index_interp -= (position_ * index_step)
Values_interp = np.interp(Index_interp, Index_, Values_)
Index_interp = Index_interp + (position_ * index_step)
return Index_interp, Values_interp
def array_2D_sort_ascending_by_column(array_, column_=0):
array_sorted = array_[array_[:, column_].argsort()]
return array_sorted
def get_ax_range(ax):
x_1 = ax.axis()[0]
x_2 = ax.axis()[1]
y_1 = ax.axis()[2]
y_2 = ax.axis()[3]
return x_1, x_2, y_1, y_2
def get_array_perimeter_only(array_):
return np.concatenate([array_[0, :-1], array_[:-1, -1], array_[-1, ::-1], array_[-2:0:-1, 0]])
# WRF
def wrf_var_search(wrf_nc_file, description_str):
description_str_lower = description_str.lower()
for var_ in sorted(wrf_nc_file.variables):
try:
if description_str_lower in wrf_nc_file.variables[var_].description.lower():
print(var_, '|', wrf_nc_file.variables[var_].description)
except:
pass
def create_virtual_sonde_from_wrf(sonde_dict, filelist_wrf_output,
wrf_filename_time_format = 'wrfout_d03_%Y-%m-%d_%H_%M_%S'):
# create time array
filelist_wrf_output_noPath = []
for filename_ in filelist_wrf_output:
filelist_wrf_output_noPath.append(filename_.split('/')[-1])
wrf_time_file_list = np.array(time_str_to_seconds(filelist_wrf_output_noPath, wrf_filename_time_format))
# create lat and lon arrays
wrf_domain_file = nc.Dataset(filelist_wrf_output[0])
# p(sorted(wrf_domain_file.variables))
# wrf_vars = sorted(wrf_domain_file.variables)
# for i_ in range(len(wrf_vars)):
# try:
# print(wrf_vars[i_], '\t\t', wrf_domain_file.variables[wrf_vars[i_]].description)
# except:
# print(wrf_vars[i_])
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
wrf_lat_U = wrf_domain_file.variables['XLAT_U'][0, :, :].filled(np.nan)
wrf_lon_U = wrf_domain_file.variables['XLONG_U'][0, :, :].filled(np.nan)
wrf_lat_V = wrf_domain_file.variables['XLAT_V'][0, :, :].filled(np.nan)
wrf_lon_V = wrf_domain_file.variables['XLONG_V'][0, :, :].filled(np.nan)
wrf_domain_file.close()
# load sonde's profile
sonde_hght = sonde_dict['hght'] # m ASL
sonde_pres = sonde_dict['pres'] # hPa
sonde_time = sonde_dict['time'] # seconds since epoc
sonde_lati = sonde_dict['lati'] # degrees
sonde_long = sonde_dict['long'] # degrees
# create output lists of virtual sonde
list_p__ = []
list_hgh = []
list_th_ = []
list_th0 = []
list_qv_ = []
list_U__ = []
list_V__ = []
list_tim = []
list_lat = []
list_lon = []
wrf_point_abs_address_old = 0
# loop thru real sonde's points
for t_ in range(sonde_hght.shape[0]):
p_progress_bar(t_, sonde_hght.shape[0])
point_hght = sonde_hght[t_]
point_pres = sonde_pres[t_]
point_time = sonde_time[t_]
point_lati = sonde_lati[t_]
point_long = sonde_long[t_]
# find closest cell via lat, lon
index_tuple = find_index_from_lat_lon_2D_arrays(wrf_lat,wrf_lon, point_lati,point_long)
index_tuple_U = find_index_from_lat_lon_2D_arrays(wrf_lat_U,wrf_lon_U, point_lati,point_long)
index_tuple_V = find_index_from_lat_lon_2D_arrays(wrf_lat_V,wrf_lon_V, point_lati,point_long)
# find closest file via time
file_index = time_to_row_sec(wrf_time_file_list, point_time)
# open wrf file
wrf_domain_file = nc.Dataset(filelist_wrf_output[file_index])
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, index_tuple[0], index_tuple[1]].data +
wrf_domain_file.variables['P'][0, :, index_tuple[0], index_tuple[1]].data) / 100 # hPa
# find closest model layer via pressure
layer_index = find_min_index_1d_array(np.abs(wrf_press - point_pres))
# define point absolute address and check if it is a new point
wrf_point_abs_address_new = (index_tuple[0], index_tuple[1], file_index, layer_index)
if wrf_point_abs_address_new != wrf_point_abs_address_old:
wrf_point_abs_address_old = wrf_point_abs_address_new
# get wrf data
index_tuple_full = (0, layer_index, index_tuple[0], index_tuple[1])
index_tuple_full_U = (0, layer_index, index_tuple_U[0], index_tuple_U[1])
index_tuple_full_V = (0, layer_index, index_tuple_V[0], index_tuple_V[1])
# save to arrays
list_p__.append(float(wrf_press[layer_index]))
list_hgh.append(float(point_hght))
list_th_.append(float(wrf_domain_file.variables['T'][index_tuple_full]))
list_th0.append(float(wrf_domain_file.variables['T00'][0]))
list_qv_.append(float(wrf_domain_file.variables['QVAPOR'][index_tuple_full]))
list_U__.append(float(wrf_domain_file.variables['U'][index_tuple_full_U]))
list_V__.append(float(wrf_domain_file.variables['V'][index_tuple_full_V]))
list_tim.append(float(wrf_time_file_list[file_index]))
list_lat.append(float(wrf_lat[index_tuple[0], index_tuple[1]]))
list_lon.append(float(wrf_lon[index_tuple[0], index_tuple[1]]))
wrf_domain_file.close()
# convert lists to arrays
array_p__ = np.array(list_p__)
array_hgh = np.array(list_hgh)
array_th_ = np.array(list_th_)
array_th0 = np.array(list_th0)
array_qv_ = np.array(list_qv_)
array_U__ = np.array(list_U__)
array_V__ = np.array(list_V__)
array_tim = np.array(list_tim)
array_lat = np.array(list_lat)
array_lon = np.array(list_lon)
# calculate derivative variables
wrf_temp_K = calculate_temperature_from_potential_temperature(array_th_ + array_th0, array_p__)
wrf_temp_C = kelvin_to_celsius(wrf_temp_K)
wrf_e = MixR2VaporPress(array_qv_, array_p__*100)
wrf_td_C = DewPoint(wrf_e)
wrf_td_C[wrf_td_C > wrf_temp_C] = wrf_temp_C[wrf_td_C > wrf_temp_C]
wrf_RH = calculate_RH_from_QV_T_P(array_qv_, wrf_temp_K, array_p__*100)
wrf_WD, wrf_WS = cart_to_polar(array_V__, array_U__)
wrf_WD_met = wrf_WD + 180
wrf_WD_met[wrf_WD_met >= 360] = wrf_WD_met[wrf_WD_met >= 360] - 360
wrf_WS_knots = ws_ms_to_knots(wrf_WS)
# create virtual sonde dict
wrf_sonde_dict = {}
wrf_sonde_dict['hght'] = array_hgh
wrf_sonde_dict['pres'] = array_p__
wrf_sonde_dict['temp'] = wrf_temp_C
wrf_sonde_dict['dwpt'] = wrf_td_C
wrf_sonde_dict['sknt'] = wrf_WS_knots
wrf_sonde_dict['drct'] = wrf_WD_met
wrf_sonde_dict['relh'] = wrf_RH
wrf_sonde_dict['time'] = array_tim
wrf_sonde_dict['lati'] = array_lat
wrf_sonde_dict['long'] = array_lon
return wrf_sonde_dict
def wrf_get_temp_K(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
wrf_theta = (wrf_domain_file.variables['T'][0, :, :, :].data +
wrf_domain_file.variables['T00'][0].data) # K
wrf_temp_K = calculate_temperature_from_potential_temperature(wrf_theta, wrf_press)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_temp_K
def wrf_get_press_hPa(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
if original_arg_type_str:
wrf_domain_file.close()
return wrf_press
def wrf_get_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,:-1,:,:].data +
wrf_domain_file.variables['PHB'][0,:-1,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_terrain_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,0,:,:].data +
wrf_domain_file.variables['PHB'][0,0,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_water_vapor_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QVAPOR = wrf_domain_file.variables['QVAPOR'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QVAPOR
def wrf_get_cloud_water_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QCLOUD = wrf_domain_file.variables['QCLOUD'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QCLOUD
def wrf_get_ice_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QICE = wrf_domain_file.variables['QICE'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QICE
def wrf_get_lat_lon(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_lat, wrf_lon
def wrf_rename_files_fix_time_format(filename_original_list, original_character=':', replacement_character='_'):
for i_, filename_ in enumerate(filename_original_list):
p_progress_bar(i_, len(filename_original_list))
new_filename = filename_.replace(original_character,replacement_character)
os.rename(filename_, new_filename)
# meteorology
def calculate_saturation_vapor_pressure_wexler(T_array_K):
# result in mb (hPa)
G0 = -0.29912729E+4
G1 = -0.60170128E+4
G2 = 0.1887643854E+2
G3 = -0.28354721E-1
G4 = 0.17838301E-4
G5 = -0.84150417E-9
G6 = 0.44412543E-12
G7 = 0.2858487E+1
e_s = np.exp((G0 * (T_array_K ** -2)) +
(G1 * (T_array_K ** -1)) +
G2 +
(G3 * T_array_K) +
(G4 * (T_array_K ** 2)) +
(G5 * (T_array_K ** 3)) +
(G6 * (T_array_K ** 4)) +
(G7 * np.log(T_array_K)))
return e_s * 0.01
def calculate_saturation_mixing_ratio(P_array_mb, T_array_K):
e_s = calculate_saturation_vapor_pressure_wexler(T_array_K)
q_s = 621.97 * (e_s / (P_array_mb - e_s))
return q_s
def calculate_potential_temperature(T_array_K, P_array_hPa):
potential_temp = T_array_K * ((1000 / P_array_hPa) ** poisson_)
return potential_temp
def calculate_equivalent_potential_temperature(T_array_K, P_array_hPa, R_array_kg_over_kg):
P_o = 1000
T_e = T_array_K + (latent_heat_v * R_array_kg_over_kg / heat_capacity__Cp)
theta_e = T_e * ((P_o/P_array_hPa)**poisson_)
return theta_e
def calculate_temperature_from_potential_temperature(theta_array_K, P_array_hPa):
temperature_ = theta_array_K * ( (P_array_hPa/1000) ** poisson_ )
return temperature_
def calculate_mountain_height_from_sonde(sonde_dict):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
"""
# Set initial conditions
height = 1000 # metres
# define arrays
WS_array = ws_knots_to_ms(sonde_dict['SKNT'])
U_array, V_array = polar_to_cart(sonde_dict['DRCT'], WS_array)
T_array = celsius_to_kelvin(sonde_dict['TEMP'])
RH_array = sonde_dict['RELH']
P_array = sonde_dict['PRES']
Z_array = sonde_dict['HGHT']
Q_array = sonde_dict['MIXR']/1000
TH_array = sonde_dict['THTA']
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# gradients
d_ln_TH = np.gradient(np.log(TH_array))
d_z = np.gradient(Z_array)
d_q_s = np.gradient(q_s)
# Dry Brunt - Vaisala
N_dry = gravity_ * d_ln_TH / d_z
N_dry[RH_array >= 90] = 0
# Moist Brunt - Vaisala
term_1_1 = 1 + ( latent_heat_v * q_s / (gas_const_dry * T_array) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * q_s / (heat_capacity__Cp * gas_const_dry * (T_array**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * T_array)
term_2_3 = d_q_s / d_z
term_3 = d_q_s / d_z # should be d_q_w but sonde data has no cloud water data
N_moist = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
N_moist[RH_array < 90] = 0
# define output array
N_2 = (N_dry + N_moist)**2
H_hat_2 = N_2 * (height**2) / (U_array**2)
return H_hat_2
def calculate_mountain_height_from_era5(era5_pressures_filename, era5_surface_filename, point_lat, point_lon,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
time_start_str_YYYYmmDDHHMM='',time_stop_str_YYYYmmDDHHMM='',
reference_height=1000, return_debug_arrays=False):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
"""
# load files
era5_sur = nc.Dataset(era5_surface_filename, 'r')
era5_pre = nc.Dataset(era5_pressures_filename, 'r')
# check if times are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['time'][:] - era5_sur.variables['time'][:]))
if dif_sum > 0:
print('Error, times in selected files are not the same')
return
# check if lat lon are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['latitude'][:] - era5_sur.variables['latitude'][:]))
dif_sum = dif_sum + np.sum(np.abs(era5_pre.variables['longitude'][:] - era5_sur.variables['longitude'][:]))
if dif_sum > 0:
print('Error, latitude or longitude in selected files are not the same')
return
# find lat lon index
lat_index, lon_index = find_index_from_lat_lon(era5_sur.variables['latitude'][:],
era5_sur.variables['longitude'][:], [point_lat], [point_lon])
lat_index = lat_index[0]
lon_index = lon_index[0]
# copy arrays
time_array = time_era5_to_seconds(np.array(era5_sur.variables['time'][:]))
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM != '':
r_1 = time_to_row_str(time_array, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM != '':
r_2 = time_to_row_str(time_array, time_stop_str_YYYYmmDDHHMM)
time_array = time_array[r_1:r_2]
sp_array = np.array(era5_sur.variables['sp'][r_1:r_2, lat_index, lon_index]) / 100 # hPa
P_array = np.array(era5_pre.variables['level'][:]) # hPa
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]).flatten(),
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]).flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((sp_array.shape[0], P_array.shape[0]))
else:
if u_wind_mode == 'u':
U_array = np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index])
else:
U_array = np.sqrt(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]) ** 2 +
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]) ** 2)
T_array = np.array(era5_pre.variables['t'][r_1:r_2, :, lat_index, lon_index])
Q_L_array = np.array(era5_pre.variables['crwc'][r_1:r_2, :, lat_index, lon_index])
RH_array = np.array(era5_pre.variables['r'][r_1:r_2, :, lat_index, lon_index])
Z_array = np.array(era5_pre.variables['z'][r_1:r_2, :, lat_index, lon_index]) / gravity_
# calculate arrays
TH_array = np.zeros((time_array.shape[0], P_array.shape[0]), dtype=float)
for t_ in range(time_array.shape[0]):
TH_array[t_,:] = calculate_potential_temperature(T_array[t_,:], P_array[:])
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output dict
H_hat_2 = {}
# loop tru time stamps
for t_ in range(time_array.shape[0]):
p_progress_bar(t_,time_array.shape[0])
# find surface pressure at this time stamp
surface_p = sp_array[t_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[t_, :], P_array)
pressure_1000m_index = np.argmin(np.abs(P_array - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(P_array), Q_L_array[t_, :])
z__0 = np.interp(np.log(surface_p), np.log(P_array), Z_array[t_, :])
th_0 = np.interp(np.log(surface_p), np.log(P_array), TH_array[t_, :])
qs_0 = np.interp(np.log(surface_p), np.log(P_array), q_s[t_, :])
t__1000 = np.interp(reference_height, Z_array[t_, :], T_array[t_, :])
u__1000 = np.interp(reference_height, Z_array[t_, :], U_array[t_, :])
ql_1000 = np.interp(reference_height, Z_array[t_, :], Q_L_array[t_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[t_, :], TH_array[t_, :])
qs_1000 = np.interp(reference_height, Z_array[t_, :], q_s[t_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[t_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[time_array[t_]] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
era5_sur.close()
era5_pre.close()
if return_arrays:
H_hat_2_time = sorted(H_hat_2.keys())
H_hat_2_time = np.array(H_hat_2_time)
H_hat_2_vals = np.zeros(H_hat_2_time.shape[0], dtype=float)
for r_ in range(H_hat_2_time.shape[0]):
H_hat_2_vals[r_] = H_hat_2[H_hat_2_time[r_]]
if return_debug_arrays:
return H_hat_2_time, H_hat_2_vals, N_2, u__1000 ** 2
else:
return H_hat_2_time, H_hat_2_vals
else:
return H_hat_2
def calculate_mountain_height_from_WRF(filename_SP, filename_PR,
filename_UU, filename_VV,
filename_TH, filename_QR,
filename_QV, filename_PH,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
reference_height=1000):
"""
calculates H_hat from WRF point output text files
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
:param filename_SP: fullpath filename of surface pressure file
:param filename_PR: fullpath filename of pressure file
:param filename_UU: fullpath filename of u wind file
:param filename_VV: fullpath filename of v wind file
:param filename_TH: fullpath filename of potential temperature file
:param filename_QR: fullpath filename of rain water mixing ratio file
:param filename_QV: fullpath filename of Water vapor mixing ratio file
:param filename_PH: fullpath filename of geopotential height file
:param return_arrays: if true, will return also brunt vaisalla and wind component squared
:param u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
:param range_line_degrees: if not None, u_wind_mode will automatically be set to normal_to_range
:param reference_height: mean height of mountain range
:return:
H_hat_2
"""
# load arrays from text
SP_array = genfromtxt(filename_SP, dtype=float, skip_header=1)[:,9] / 100 # hPa
PR_array = genfromtxt(filename_PR, dtype=float, skip_header=1)[:,1:] / 100 # hPa
UU_array = genfromtxt(filename_UU, dtype=float, skip_header=1)[:,1:]
VV_array = genfromtxt(filename_VV, dtype=float, skip_header=1)[:,1:]
TH_array = genfromtxt(filename_TH, dtype=float, skip_header=1)[:,1:]
QR_array = genfromtxt(filename_QR, dtype=float, skip_header=1)[:,1:]
QV_array = genfromtxt(filename_QV, dtype=float, skip_header=1)[:,1:]
Z_array = genfromtxt(filename_PH, dtype=float, skip_header=1)[:,1:] # already in meters
# calculate arrays
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(UU_array.flatten(), VV_array.flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((UU_array.shape[0], UU_array.shape[1]))
else:
if u_wind_mode == 'u':
U_array = UU_array
else:
U_array = np.sqrt(UU_array ** 2 + VV_array ** 2)
T_array = calculate_temperature_from_potential_temperature(TH_array, PR_array)
RH_array = calculate_RH_from_QV_T_P(QV_array, T_array, PR_array*100)
q_s = calculate_saturation_mixing_ratio(PR_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output array
H_hat_2 = np.zeros(PR_array.shape[0], dtype=float)
# loop tru time stamps
for r_ in range(PR_array.shape[0]):
p_progress_bar(r_, PR_array.shape[0])
# find surface pressure at this time stamp
surface_p = SP_array[r_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[r_, :], PR_array[r_, :])
pressure_1000m_index = np.argmin(np.abs(PR_array[r_, :] - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), QR_array[r_, :])
z__0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), Z_array[r_, :])
th_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), TH_array[r_, :])
qs_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), q_s[r_, :])
t__1000 = np.interp(reference_height, Z_array[r_, :], T_array[r_, :])
u__1000 = np.interp(reference_height, Z_array[r_, :], U_array[r_, :])
ql_1000 = np.interp(reference_height, Z_array[r_, :], QR_array[r_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[r_, :], TH_array[r_, :])
qs_1000 = np.interp(reference_height, Z_array[r_, :], q_s[r_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[r_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[r_] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
if return_arrays:
return H_hat_2, N_2, u__1000 ** 2
else:
return H_hat_2
def calculate_dewpoint_from_T_RH(T_, RH_):
"""
from Magnus formula, using Bolton's constants
:param T_: ambient temperature [Celsius]
:param RH_: relative humidity
:return: Td_ dew point temperature [celsius]
"""
a = 6.112
b = 17.67
c = 243.5
y_ = np.log(RH_/100) + ((b*T_)/(c+T_))
Td_ = (c * y_) / (b - y_)
return Td_
def calculate_RH_from_QV_T_P(arr_qvapor, arr_temp_K, arr_press_Pa):
tv_ = 6.11 * e_constant**((2500000/461) * ((1/273) - (1/arr_temp_K)))
pv_ = arr_qvapor * (arr_press_Pa/100) / (arr_qvapor + 0.622)
return np.array(100 * pv_ / tv_)
def calculate_profile_input_for_cluster_analysis_from_ERA5(p_profile, t_profile, td_profile, q_profile,
u_profile, v_profile, h_profile, surface_p):
"""
takes data from ERA5 for only one time stamp for all pressure levels from 250 to 1000 hPa
:param p_profile: in hPa
:param t_profile: in Celsius
:param td_profile: in Celsius
:param q_profile: in kg/kg
:param u_profile: in m/s
:param v_profile: in m/s
:param h_profile: in m
:param surface_p: in hPa
:return: surface_p, qv_, qu_, tw_, sh_, tt_
"""
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_q = np.interp(np.log(surface_p), np.log(p_profile), q_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
Q_array[:levels_total] = q_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
Q_array[-1] = surface_q
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
######################################
r_850 = np.argmin(np.abs(P_array - 850))
r_500 = np.argmin(np.abs(P_array - 500))
dp_ = np.abs(np.gradient(P_array))
tt_ = (T_array[r_850] - (2 * T_array[r_500]) + Td_array[r_850])
qu_ = np.sum(Q_array * U_array * dp_) / gravity_
qv_ = np.sum(Q_array * V_array * dp_) / gravity_
tw_ = np.sum(Q_array * dp_) / gravity_
del_u = U_array[r_850] - U_array[r_500]
del_v = V_array[r_850] - V_array[r_500]
del_z = H_array[r_850] - H_array[r_500]
sh_ = ((del_u / del_z) ** 2 + (del_v / del_z) ** 2) ** 0.5
return surface_p, qv_, qu_, tw_, sh_, tt_
def barometric_equation(presb_pa, tempb_k, deltah_m, Gamma=-0.0065):
"""The barometric equation models the change in pressure with
height in the atmosphere.
INPUTS:
presb_k (pa): The base pressure
tempb_k (K): The base temperature
deltah_m (m): The height differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
pres (pa): Pressure at the requested level
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return presb_pa * \
(tempb_k/(tempb_k+Gamma*deltah_m))**(grav*m_a/(Rstar_a*Gamma))
def barometric_equation_inv(heightb_m, tempb_k, presb_pa,
prest_pa, Gamma=-0.0065):
"""The barometric equation models the change in pressure with height in
the atmosphere. This function returns altitude given
initial pressure and base altitude, and pressure change.
INPUTS:
heightb_m (m):
presb_pa (pa): The base pressure
tempb_k (K) : The base temperature
deltap_pa (m): The pressure differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
heightt_m
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return heightb_m + \
tempb_k * ((presb_pa/prest_pa)**(Rstar_a*Gamma/(grav*m_a))-1) / Gamma
def Theta(tempk, pres, pref=100000.):
"""Potential Temperature
INPUTS:
tempk (K)
pres (Pa)
pref: Reference pressure (default 100000 Pa)
OUTPUTS: Theta (K)
Source: Wikipedia
Prints a warning if a pressure value below 2000 Pa input, to ensure
that the units were input correctly.
"""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return tempk * (pref/pres)**(Rs_da/Cp_da)
def TempK(theta, pres, pref=100000.):
"""Inverts Theta function."""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return theta * (pres/pref)**(Rs_da/Cp_da)
def ThetaE(tempk, pres, e):
"""Calculate Equivalent Potential Temperature
for lowest model level (or surface)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
OUTPUTS:
theta_e: equivalent potential temperature
References:
Eq. (9.40) from Holton (2004)
Eq. (22) from Bolton (1980)
<NAME> and <NAME> (2013), 'Land-Ocean Warming
Contrast over a Wide Range of Climates: Convective Quasi-Equilibrium
Theory and Idealized Simulations', J. Climate """
# tempc
tempc = tempk - degCtoK
# Calculate theta
theta = Theta(tempk, pres)
# T_lcl formula needs RH
es = VaporPressure(tempc)
RH = 100. * e / es
# theta_e needs q (water vapour mixing ratio)
qv = MixRatio(e, pres)
# Calculate the temp at the Lifting Condensation Level
T_lcl = ((tempk-55)*2840 / (2840-(np.log(RH/100)*(tempk-55)))) + 55
# print "T_lcl :%.3f"%T_lcl
# DEBUG STUFF ####
theta_l = tempk * \
(100000./(pres-e))**(Rs_da/Cp_da)*(tempk/T_lcl)**(0.28*qv)
# print "theta_L: %.3f"%theta_l
# Calculate ThetaE
theta_e = theta_l * np.exp((Lv * qv) / (Cp_da * T_lcl))
return theta_e
def ThetaE_Bolton(tempk, pres, e, pref=100000.):
"""Theta_E following Bolton (1980)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
See http://en.wikipedia.org/wiki/Equivalent_potential_temperature
"""
# Preliminary:
T = tempk
qv = MixRatio(e, pres)
Td = DewPoint(e) + degCtoK
kappa_d = Rs_da / Cp_da
# Calculate TL (temp [K] at LCL):
TL = 56 + ((Td-56.)**-1+(np.log(T/Td)/800.))**(-1)
# print "TL: %.3f"%TL
# Calculate Theta_L:
thetaL = T * (pref/(pres-e))**kappa_d*(T/TL)**(0.28*qv)
# print "theta_L: %.3f"%thetaL
# put it all together to get ThetaE
thetaE = thetaL * np.exp((3036./TL-0.78)*qv*(1+0.448*qv))
return thetaE
def ThetaV(tempk, pres, e):
"""Virtual Potential Temperature
INPUTS
tempk (K)
pres (Pa)
e: Water vapour pressure (Pa) (Optional)
OUTPUTS
theta_v : Virtual potential temperature
"""
mixr = MixRatio(e, pres)
theta = Theta(tempk, pres)
return theta * (1+mixr/Epsilon) / (1+mixr)
def GammaW(tempk, pres):
"""Function to calculate the moist adiabatic lapse rate (deg C/Pa) based
on the environmental temperature and pressure.
INPUTS:
tempk (K)
pres (Pa)
RH (%)
RETURNS:
GammaW: The moist adiabatic lapse rate (Deg C/Pa)
REFERENCE:
http://glossary.ametsoc.org/wiki/Moist-adiabatic_lapse_rate
(Note that I multiply by 1/(grav*rho) to give MALR in deg/Pa)
"""
tempc = tempk-degCtoK
es = VaporPressure(tempc)
ws = MixRatio(es, pres)
# tempv=VirtualTempFromMixR(tempk,ws)
tempv = VirtualTemp(tempk, pres, es)
latent = Latentc(tempc)
Rho = pres / (Rs_da*tempv)
# This is the previous implementation:
# A=1.0+latent*ws/(Rs_da*tempk)
# B=1.0+Epsilon*latent*latent*ws/(Cp_da*Rs_da*tempk*tempk)
# Gamma=(A/B)/(Cp_da*Rho)
# This is algebraically identical but a little clearer:
A = -1. * (1.0+latent*ws/(Rs_da*tempk))
B = Rho * (Cp_da+Epsilon*latent*latent*ws/(Rs_da*tempk*tempk))
Gamma = A / B
return Gamma
def DensHumid(tempk, pres, e):
"""Density of moist air.
This is a bit more explicit and less confusing than the method below.
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
SOURCE: http://en.wikipedia.org/wiki/Density_of_air
"""
pres_da = pres - e
rho_da = pres_da / (Rs_da * tempk)
rho_wv = e/(Rs_v * tempk)
return rho_da + rho_wv
def Density(tempk, pres, mixr):
"""Density of moist air
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
"""
virtualT = VirtualTempFromMixR(tempk, mixr)
return pres / (Rs_da * virtualT)
def VirtualTemp(tempk, pres, e):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
e: vapour pressure (Pa)
p: static pressure (Pa)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia)."""
tempvk = tempk / (1-(e/pres)*(1-Epsilon))
return tempvk
def VirtualTempFromMixR(tempk, mixr):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
mixr: Mixing Ratio (kg/kg)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia). This is an approximation
based on a m
"""
return tempk * (1.0+0.6*mixr)
def Latentc(tempc):
"""Latent heat of condensation (vapourisation)
INPUTS:
tempc (C)
OUTPUTS:
L_w (J/kg)
SOURCE:
http://en.wikipedia.org/wiki/Latent_heat#Latent_heat_for_condensation_of_water
"""
return 1000 * (2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)
def VaporPressure(tempc, phase="liquid"):
"""Water vapor pressure over liquid water or ice.
INPUTS:
tempc: (C) OR dwpt (C), if SATURATION vapour pressure is desired.
phase: ['liquid'],'ice'. If 'liquid', do simple dew point. If 'ice',
return saturation vapour pressure as follows:
Tc>=0: es = es_liquid
Tc <0: es = es_ice
RETURNS: e_sat (Pa)
SOURCE: http://cires.colorado.edu/~voemel/vp.html (#2:
CIMO guide (WMO 2008), modified to return values in Pa)
This formulation is chosen because of its appealing simplicity,
but it performs very well with respect to the reference forms
at temperatures above -40 C. At some point I'll implement Goff-Gratch
(from the same resource).
"""
over_liquid = 6.112 * np.exp(17.67*tempc/(tempc+243.12))*100.
over_ice = 6.112 * np.exp(22.46*tempc/(tempc+272.62))*100.
# return where(tempc<0,over_ice,over_liquid)
if phase == "liquid":
# return 6.112*exp(17.67*tempc/(tempc+243.12))*100.
return over_liquid
elif phase == "ice":
# return 6.112*exp(22.46*tempc/(tempc+272.62))*100.
return np.where(tempc < 0, over_ice, over_liquid)
else:
raise NotImplementedError
def SatVap(dwpt, phase="liquid"):
"""This function is deprecated, return ouput from VaporPres"""
print("WARNING: This function is deprecated, please use VaporPressure()" +
" instead, with dwpt as argument")
return VaporPressure(dwpt, phase)
def MixRatio(e, p):
"""Mixing ratio of water vapour
INPUTS
e (Pa) Water vapor pressure
p (Pa) Ambient pressure
RETURNS
qv (kg kg^-1) Water vapor mixing ratio`
"""
return Epsilon * e / (p - e)
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
def DewPoint(e):
""" Use Bolton's (1980, MWR, p1047) formulae to find tdew.
INPUTS:
e (Pa) Water Vapor Pressure
OUTPUTS:
Td (C)
"""
ln_ratio = np.log(e/611.2)
Td = ((17.67-ln_ratio)*degCtoK+243.5*ln_ratio)/(17.67-ln_ratio)
return Td - degCtoK
def WetBulb(tempc, RH):
"""Stull (2011): Wet-Bulb Temperature from Relative Humidity and Air
Temperature.
INPUTS:
tempc (C)
RH (%)
OUTPUTS:
tempwb (C)
"""
Tw = tempc * np.arctan(0.151977*(RH+8.313659)**0.5) + \
np.arctan(tempc+RH) - np.arctan(RH-1.676331) + \
0.00391838*RH**1.5*np.arctan(0.023101*RH) - \
4.686035
return Tw
# unit conversions
def convert_unit_and_save_data_ppb_ugm3(filename_, station_name):
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'11' : 1.96, # O3
'10' : 1.25, # NO
'9' : 1.88, # NO2
'16' : 2.62, # SO2
'8' : 1.15} # CO
new_unit_name = '[$\mu$g/m$^3$]'
parameter_name_mod = {'9' : 'NO$_2$',
'11' : 'O$_3$',
'12' : 'PM$_1$$_0$',
'13' : 'PM$_2$$_.$$_5$',
'7' : 'CO$_2$',
'16' : 'SO$_2$',
}
# station_name = 'QF_01'
data_array = open_csv_file(filename_)
current_header = data_array[0,:]
new_header = np.array(current_header)
v_current = np.array(data_array[1:,:],dtype=float)
v_new = np.array(v_current)
for keys_ in parameters_unit_scaling.keys():
v_new[:, int(keys_)] = v_current[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
# add station name suffix
for i_ in range(5,22):
if str(i_) in parameter_name_mod.keys():
parameter_name = parameter_name_mod[str(i_)]
else:
parameter_name = current_header[i_].split('_')[0]
if str(i_) in parameters_unit_scaling.keys():
parameter_unit = new_unit_name
else:
parameter_unit = current_header[i_].split('_')[1]
new_header[i_] = station_name + '_' + parameter_name + '_' + parameter_unit
data_array[1:,:] = v_new
data_array[0,:] = new_header
filename_new = filename_.split('\\')[-1].split('.')[0] + '_unit_converted.csv'
current_filename_without_path = filename_.split('\\')[-1]
current_filename_path = filename_[:-len(current_filename_without_path)]
numpy_save_txt(current_filename_path + filename_new, data_array)
print('done!')
def save_data_with_unit_conversion_ppb_ugm3(file_list_path):
file_list = sorted(glob.glob(str(file_list_path + '\\' + '*.csv')))
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'12' : 1.96, # O3
'13' : 1.25, # NO
'14' : 1.88, # NO2
'15' : 2.62, # SO2
'16' : 1.15} # CO
parameters_new_names = ['YYYY', # 0
'MM', # 1
'DD', # 2
'HH', # 3
'mm', # 4
'Day of the week', # 5
'WD degrees', # 6
'WS m/s', # 7
'Temp Celsius', # 8
'RH %', # 9
'SR W/m2', # 10
'ATP mbar', # 11
'O3 ug/m3', # 12
'NO ug/m3', # 13
'NO2 ug/m3', # 14
'SO2 ug/m3', # 15
'CO mg/m3', # 16
'CO2 ppm', # 17
'PM10 ug/m3', # 18
'PM2.5 ug/m3', # 19
'THC ppm', # 20
'Rain mm', # 21
'Ox ppb', # 22
'NOx ppb'] # 23
for month_ in range(1,13):
print(month_)
filename_old = file_list[month_ -1]
data_array = open_csv_file(file_list[month_ -1])
v_ppb = np.array(data_array[1:,:],dtype=float)
v_ug_m3 = np.array(v_ppb)
for keys_ in parameters_unit_scaling.keys():
v_ug_m3[:, int(keys_)] = v_ppb[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
data_array[0, :] = parameters_new_names
data_array[1:,:] = v_ug_m3
filename_new = filename_old.split('\\')[-1].split('.')[0] + '_ugm3.csv'
numpy_save_txt(file_list_path + '\\' + filename_new, data_array)
print('done!')
def RH_to_abs_conc(arr_RH,arr_T):
a_ = 1-(373.15/arr_T)
c_1 = 13.3185
c_2 = -1.97
c_3 = -.6445
c_4 = -.1299
Po_H2O = 1013.25 * e_constant ** ((c_1 * (a_**1)) +
(c_2 * (a_**2)) +
(c_3 * (a_**3)) +
(c_4 * (a_**4)) ) # mbar
return (arr_RH * Po_H2O) / (100 * boltzmann_ * arr_T)
def Mixing_Ratio_to_molecules_per_cm3(arr_MR, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_Molec_per_cm3 = arr_MR * ( ATP_mbar / ( boltzmann_ * arr_temp ) ) # molecules / cm3
return arr_Molec_per_cm3
def molecules_per_cm3_to_Mixing_Ratio(arr_Molec_per_cm3, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_MR = (arr_Molec_per_cm3 * boltzmann_ * arr_temp) / ATP_mbar
return arr_MR
def ws_knots_to_ms(arr_):
return arr_ * .514444
def ws_ms_to_knots(arr_):
return arr_ / .514444
def kelvin_to_celsius(arr_temp_k):
return arr_temp_k - 273.15
def celsius_to_kelvin(arr_temp_c):
return arr_temp_c + 273.15
# geo reference
def find_index_from_lat_lon(series_lat, series_lon, point_lat_list, point_lon_list):
lat_index_list = []
lon_index_list = []
# mask arrays
lat_m = series_lat
lon_m = series_lon
if np.sum(lat_m) != np.sum(lat_m) or np.sum(lon_m) != np.sum(lon_m):
lat_m = np.ma.masked_where(np.isnan(lat_m), lat_m)
lat_m = np.ma.masked_where(np.isinf(lat_m), lat_m)
lon_m = np.ma.masked_where(np.isnan(lon_m), lon_m)
lon_m = np.ma.masked_where(np.isinf(lon_m), lon_m)
if type(point_lat_list) == tuple or type(point_lat_list) == list:
for lat_ in point_lat_list:
lat_index_list.append(np.argmin(np.abs(lat_m - lat_)))
for lon_ in point_lon_list:
lon_index_list.append(np.argmin(np.abs(lon_m - lon_)))
else:
lat_index_list = np.argmin(np.abs(lat_m - point_lat_list))
lon_index_list = np.argmin(np.abs(lon_m - point_lon_list))
return lat_index_list, lon_index_list
def find_index_from_lat_lon_2D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_2d_array(dist_arr)
def find_index_from_lat_lon_1D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_1d_array(dist_arr)
def distance_array_lat_lon_2D_arrays_degrees(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
return ( lat_del_arr**2 + lon_del_arr**2 )**0.5
def meter_per_degrees(lat_point):
lat_mean_rad = np.deg2rad(np.abs(lat_point))
m_per_deg_lat = 111132.954 - 559.822 * np.cos(2 * lat_mean_rad) + 1.175 * np.cos(4 * lat_mean_rad)
m_per_deg_lon = 111132.954 * np.cos(lat_mean_rad)
return np.abs(m_per_deg_lat), np.abs(m_per_deg_lon)
def degrees_per_meter(lat_point):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(lat_point)
return 1/m_per_deg_lat, 1/m_per_deg_lon
def distance_array_lat_lon_2D_arrays_degress_to_meters(lat_arr, lon_arr, point_lat, point_lon):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(np.nanmean(lat_arr))
lat_del_arr_m = (lat_arr - point_lat) * m_per_deg_lat
lon_del_arr_m = (lon_arr - point_lon) * m_per_deg_lon
return ( lat_del_arr_m**2 + lon_del_arr_m**2 )**0.5
def distance_between_to_points_in_meters(point_1_latlon, point_2_latlon):
latMid = (point_1_latlon[0] + point_2_latlon[0]) / 2
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(latMid)
del_lat = (point_1_latlon[0] - point_2_latlon[0]) * m_per_deg_lat
del_lon = (point_1_latlon[1] - point_2_latlon[1]) * m_per_deg_lon
return ((del_lat**2) + (del_lon**2))**0.5
# Data Loading
def numpy_load_txt(filename_, delimiter_=",", format_=float, skip_head=0):
return genfromtxt(filename_, delimiter=delimiter_, dtype=format_, skip_header=skip_head)
def open_csv_file(filename_, delimiter=',', skip_head=0, dtype='<U32'):
# load data
return np.array(genfromtxt(filename_, delimiter=delimiter, dtype=dtype, skip_header=skip_head))
def load_time_columns(filename_):
## user defined variables
day_column_number = 2
month_column_number = 1
year_column_number = 0
hour_column_number = 3
minute_column_number = 4
time_header = 'Time' #defining time header
data_array = open_csv_file(filename_)
# define arrays
values_str = data_array[1:,5:]
values_ = np.zeros((values_str.shape[0],values_str.shape[1]),dtype=float)
for r_ in range(values_.shape[0]):
for c_ in range(values_.shape[1]):
try:
values_[r_,c_] = float(values_str[r_,c_])
except:
values_[r_,c_] = np.nan
header_ = data_array[0 ,1:]
# defining time arrays
time_days = np.zeros(data_array.shape[0] - 1, dtype=float)
time_month = np.zeros(data_array.shape[0] - 1, dtype=int)
time_weekday = np.zeros(data_array.shape[0] - 1, dtype=int)
time_hour = np.zeros(data_array.shape[0] - 1)
for r_ in range(data_array.shape[0] - 1):
time_days[r_] = mdates.date2num(datetime.datetime(
int(float(data_array[r_+1,year_column_number])),
int(float(data_array[r_+1,month_column_number])),
int(float(data_array[r_+1,day_column_number])),
int(float(data_array[r_+1,hour_column_number])),
int(float(data_array[r_+1,minute_column_number]))))
time_month[r_] = int(float(data_array[r_+1,month_column_number]))
time_weekday[r_] = datetime.datetime.weekday(mdates.num2date(time_days[r_]))
time_hour[r_] = float(data_array[r_+1,hour_column_number]) + (float(data_array[r_+1,minute_column_number]) / 60)
# compile names
header_[0] = time_header
header_[1] = 'Month'
header_[2] = 'Day of week'
header_[3] = 'Hour of day'
# compile values
values_ = np.column_stack((time_days, time_month, time_weekday, time_hour, values_))
return header_, values_
def load_object(filename):
with open(filename, 'rb') as input_object:
object_ = pickle.load(input_object)
return object_
def read_one_line_from_text_file(filename_, line_number):
file_ = open(filename_)
for i, line in enumerate(file_):
if i == line_number :
line_str = line
elif i > line_number:
break
file_.close()
return line_str
# data saving/output
def save_time_variable_as_csv(output_filename, var_name, time_in_secs, var_values, time_format_output='%Y%m%d%H%M%S'):
out_file = open(output_filename, 'w')
# write header
out_file.write(time_format_output)
out_file.write(',')
out_file.write(var_name)
out_file.write('\n')
for r_ in range(time_in_secs.shape[0]):
p_progress_bar(r_, time_in_secs.shape[0])
out_file.write(time_seconds_to_str(time_in_secs[r_], time_format_output))
out_file.write(',' + str(var_values[r_]))
out_file.write('\n')
out_file.close()
def numpy_save_txt(filename_, array_, delimiter_=",", format_='%s'):
np.savetxt(filename_, array_, delimiter=delimiter_, fmt=format_)
def save_array_to_disk(header_with_units, time_in_seconds, values_in_floats, filename):
#
if len(values_in_floats.shape) == 1:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm', header_with_units]
else:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = time_seconds_to_5C_array(time_in_seconds)
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_HVF(header_, values_, filename):
# check if all shapes match
if len(header_) != values_.shape[1]:
print('shape of header is not compatible with shape of values')
return
time_in_seconds = mdates.num2epoch(values_[:, 0])
header_with_units = header_[2:]
values_in_floats = values_[:, 2:]
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = np.zeros((time_in_seconds.shape[0], 5), dtype='<U32')
for r_ in range(time_in_seconds.shape[0]):
if time_in_seconds[r_] == time_in_seconds[r_]:
T_[r_] = time.strftime("%Y,%m,%d,%H,%M", time.gmtime(time_in_seconds[r_])).split(',')
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_simple_array_to_disk(header_list, values_array, filename_):
# change type to str
values_str = np.array(values_array, dtype='<U32')
# join header with values
array_final = np.row_stack((header_list, values_str))
# save to hard drive
numpy_save_txt(filename_, array_final)
print('final data saved to: ' + filename_)
def save_array_as_is(array_, filename_):
np.savetxt(filename_, array_, delimiter=",", fmt='%s')
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# png data handeling
def store_array_to_png(array_, filename_out):
"""
This function saves an array to a png file while keeping as much accuracy as possible with the lowest memory ussage
:param array_: numpy array
:param filename_out: string with full path
:return: none
"""
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
img_arr = PIL_Image.fromarray(out_array)
img_arr.save(filename_out)
def read_png_to_array(filename_):
"""
This functions converts pngs files created by "store_array_to_png" back to numpy arrays
:param filename_: string with full path name to png file created by store_array_to_png
:return: numpy array
"""
# read image into array
img_arr = np.array(PIL_Image.open(filename_))
# shape
rows_ = img_arr.shape[0]
columns_ = img_arr.shape[1]
# nan array
nan_array = np.zeros((rows_, columns_), dtype='uint8')
nan_array[img_arr[:,:,1] >= 100] = 1
# power array
power_array_magnitude = ((img_arr[:,:,2]/100) - np.array(img_arr[:,:,2]/100, dtype='uint8') ) * 100
sign_array = np.zeros((rows_, columns_)) - 1
sign_array[img_arr[:,:,2] >= 100] = 1
power_array = power_array_magnitude * sign_array
# sign array
sign_array = np.array(img_arr[:,:,0]/100, dtype=int)
sign_array[sign_array == 0] = -1
# unit array
unit_array = np.array(img_arr[:,:,0]/10, dtype='uint8') - (np.array(img_arr[:,:,0]/100, dtype='uint8') * 10)
# decimal array
decimal_array_1 = (img_arr[:,:,0]/10) - np.array(img_arr[:,:,0]/10, dtype='uint8')
decimal_array_2 = ((img_arr[:,:,1]/100) - np.array(img_arr[:,:,1]/100, dtype='uint8') ) / 10
# compute out array
out_array = (sign_array * (unit_array + decimal_array_1 + decimal_array_2)) * 10 ** power_array
# flag nans
out_array[nan_array==1]=np.nan
return out_array
# sattelite data load
def load_OMI_NO2_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\no2_201601.grd'
# arr_NO2, lat_arr_NO2, lon_arr_NO2 = load_OMI_NO2_monthly_data(filename_)
# [440: -820, 1650: 1960]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
file_object = open(filename_,mode='r')
ncols = int(file_object.readline().split()[-1])
nrows = int(file_object.readline().split()[-1])
xllcorner = float(file_object.readline().split()[-1])
yllcorner = float(file_object.readline().split()[-1])
cellsize = float(file_object.readline().split()[-1])
nodata_value = float(file_object.readline().split()[-1])
# version = file_object.readline().split()[-1]
file_object.close()
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array[data_array==nodata_value] = np.nan
data_array = data_array * 1e13
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def load_OMI_HCHO_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\OMIH2CO_Grid_720x1440_201601.dat'
# arr_HCHO, lat_arr_HCHO, lon_arr_HCHO = load_OMI_HCHO_monthly_data(filename_)
# [220: -410, 825: 980]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
ncols = 1440
nrows = 720
xllcorner = -180
yllcorner = -90
cellsize = 0.25
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array = data_array * 1e15
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def download_HIM8_AUS_ch3_500m(YYYYmmddHHMM_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + \
'/' + \
YYYYmmddHHMM_str[4:6] + \
'/' + \
YYYYmmddHHMM_str[6:8] + \
'/' + \
YYYYmmddHHMM_str[8:12] + \
'/' + \
YYYYmmddHHMM_str + '00' \
'-P1S-ABOM_BRF_B03-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
r_1 = 13194
r_2 = 19491
c_1 = 4442
c_2 = 14076
return f_.variables['channel_0003_brf'][0, r_1:r_2, c_1:c_2]
def download_HIM8_AUS_2000m(YYYYmmddHHMM_str, channel_number_str, print_=True):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if print_: print('downloading HIM_8', YYYYmmddHHMM_str, channel_number_str)
f_ = nc.Dataset(url_)
r_1 = 3298
r_2 = 4873
c_1 = 1110
c_2 = 3519
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
return f_.variables[variable_name][0, r_1:r_2, c_1:c_2]
def download_HIM8_2000m(YYYYmmddHHMM_str, channel_number_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
print('downloading variable:', variable_name)
return f_.variables[variable_name][0, :,:]
def download_HIM8_AUS_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_HIM8_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_lat_lon_arrays_HIM8_500():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_lat_lon_arrays_HIM8_2000():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_big_nc_array_in_parts(url_, variable_name, parts_=4):
f_ = nc.Dataset(url_)
var_shape = f_.variables[variable_name].shape
print('downloading variable', variable_name, 'with shape:', var_shape)
if len(var_shape) == 0:
print('ERROR! variable is not an array')
return None
elif len(var_shape) == 1:
if var_shape[0] == 1:
print('ERROR! variable is a scalar')
return None
else:
rows_per_part = int(var_shape[0] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros(var_shape[0])
for part_ in range(parts_ - 1):
output_array[int(part_*rows_per_part):int((part_+1)*rows_per_part)] =\
f_.variables[variable_name][int(part_*rows_per_part):int((part_+1)*rows_per_part)]
output_array[int((parts_ -1)*rows_per_part):] = \
f_.variables[variable_name][int((parts_ -1)*rows_per_part):]
return output_array
elif len(var_shape) == 2:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)]
output_array[:,int((parts_ - 1) * rows_per_part):] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):]
return output_array
elif len(var_shape) == 3:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:]
output_array[:,int((parts_ - 1) * rows_per_part):,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:]
return output_array
elif len(var_shape) == 4:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2],var_shape[3]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:]
output_array[:,int((parts_ - 1) * rows_per_part):,:,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:,:]
return output_array
elif len(var_shape) > 4:
print('ERROR! variable has more than 4 dimensions, not implemented for this many dimentions')
return None
def get_himawari8_2000m_NCI(YYYYmmddHHMM_str, channel_number, output_format='png',
output_path='/g/k10/la6753/data/', row_start=0, row_stop=5500, col_start=0,
col_stop=5500):
"""
gets array from himawari-8 netcdf files and extracts only the indicated channel at the indicated time. saves to output_path
:param YYYYmmddHHMM_str: string with the time in four digits for year, two digits for months...
:param channel_number: int or float with the number of the channel ('01'-'16')
:param output_format: string with either 'png' or 'numpy'. If png the array will be saved used store_array_to_png, otherwise numpy.save will be used
:param output_path: string with the path, or full filename to be used to save the file
:param row_start: int with the row number to start the crop
:param row_stop: int with the row number to stop the crop
:param col_start: int with the coloumn number to start the crop
:param col_stop: int with the coloumn number to stop the crop
:return: None
"""
channel_number_str = str(int(channel_number)).zfill(2)
filename_ = '/g/data/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if os.path.exists(filename_):
f_ = nc.Dataset(filename_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
array_ = f_.variables[variable_name][0, row_start:row_stop, col_start:col_stop]
if output_path[-1] == '/' or output_path[-1] == '\\':
if output_format == 'png':
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.png'
else:
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.npy'
else:
output_filename = output_path
if output_format == 'png':
store_array_to_png(array_, output_filename)
else:
np.save(output_filename, array_)
else:
print('File not available for time stamp:', YYYYmmddHHMM_str)
# ERA5
def create_virtual_sondes_from_ERA5(time_stamp_sec, lat_lon_tuple, era5_file_levels_ncFile, era5_file_surface_ncFile,
max_time_delta_sec=21600, show_prints=True):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
time_era5_surface_sec = time_era5_to_seconds(era5_file_surface.variables['time'][:])
r_era5_levels_1 = time_to_row_sec(time_era5_levels_sec, time_stamp_sec)
r_era5_surface_1 = time_to_row_sec(time_era5_surface_sec, time_stamp_sec)
if np.abs(time_era5_levels_sec[r_era5_levels_1] - time_stamp_sec) > max_time_delta_sec:
if show_prints: print('error time gap is too large', )
return None
# find row and column for the lat lon
lat_index, lon_index = find_index_from_lat_lon(era5_file_levels.variables['latitude'][:].data,
era5_file_levels.variables['longitude'][:].data,
lat_lon_tuple[0], lat_lon_tuple[1])
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_era5_levels_1, :, lat_index, lon_index].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_era5_surface_1, lat_index, lon_index] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return P_array, H_array, T_array, Td_array, U_array, V_array
def era5_get_surface_interpolated_vars(era5_file_levels_ncFile, era5_file_surface_ncFile, show_prints=True,
time_start_str_YYYYmmDDHHMM=None, time_stop_str_YYYYmmDDHHMM=None):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
# trim time
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM is not None:
r_1 = time_to_row_str(time_era5_levels_sec, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM is not None:
r_2 = time_to_row_str(time_era5_levels_sec, time_stop_str_YYYYmmDDHHMM)
time_era5_sec = time_era5_levels_sec[r_1:r_2]
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_1:r_2, 10:, :, :].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_1:r_2, 10:, :, :].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_1:r_2, 10:, :, :].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][10:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_1:r_2, :, :] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
q_profile = era5_file_levels.variables['q'][r_1:r_2, 10:, :, :].data
if show_prints: print('created q_array')
####################################### find extrapolations
surface_t = np.zeros((surface_p.shape), dtype=float)
surface_td = np.zeros((surface_p.shape), dtype=float)
surface_u = np.zeros((surface_p.shape), dtype=float)
surface_v = np.zeros((surface_p.shape), dtype=float)
surface_h = np.zeros((surface_p.shape), dtype=float)
surface_q = np.zeros((surface_p.shape), dtype=float)
if show_prints: print('starting interpolation of every point in time')
for r_ in range(time_era5_sec.shape[0]):
p_progress_bar(r_,time_era5_sec.shape[0])
for lat_ in range(surface_p.shape[1]):
for lon_ in range(surface_p.shape[2]):
surface_t [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), t_profile [r_,:,lat_,lon_])
surface_td[r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), td_profile[r_,:,lat_,lon_])
surface_u [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), u_profile [r_,:,lat_,lon_])
surface_v [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), v_profile [r_,:,lat_,lon_])
surface_h [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), h_profile [r_,:,lat_,lon_])
surface_q [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), q_profile [r_,:,lat_,lon_])
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return surface_t, surface_td, surface_u, surface_v, surface_h, surface_q, time_era5_sec
# HYSPLIT
def hysplit_load_freq_endpoints(filename_, number_of_hours):
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
file_traj_list = []
traj_number = -1
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
traj_number += 1
for r_ in range(number_of_hours + 1):
new_line_list = line_list[line_inx + r_ + 1].split()
new_line_list.append(traj_number)
file_traj_list.append(new_line_list)
arr_ = np.zeros((len(file_traj_list),12), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(12):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def hysplit_load_freq_endpoints_all(file_list):
file_traj_list = []
for filename_ in file_list:
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
for r_ in range(25):
file_traj_list.append(line_list[line_inx + r_ + 1].split())
arr_ = np.zeros((len(file_traj_list),11), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(11):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def calculate_mean_time(file_list, lat_tuple, lon_tuple):
# file_list_irn = sorted(glob.glob(str('E:\\hysplit_IRN\\' + '*.txt')))
# file_list_uae = sorted(glob.glob(str('E:\\hysplit_UAE\\' + '*.txt')))
# lat_tuple = tuple((24.889974, 26.201930))
# lon_tuple = tuple((50.727086, 51.729315))
hit_counter_list = []
total_counter_list = []
# month_list_list = []
month_mean_time = []
month_std_time = []
month_probability_list = []
for filename_ in file_list:
arr_ = hysplit_load_freq_endpoints(filename_, 24)
hit_counter = 0
hit_age = []
total_number_of_trajs = int(np.max(arr_[:,-1]))
for traj_ in range(total_number_of_trajs + 1):
for r_ in range(arr_.shape[0]):
if arr_[r_,-1] == traj_:
if lat_tuple[0] < arr_[r_, 7] < lat_tuple[1] and lon_tuple[0] < arr_[r_, 8] < lon_tuple[1]:
hit_counter += 1
hit_age.append(arr_[r_, 6])
break
hit_counter_list.append(hit_counter)
total_counter_list.append(total_number_of_trajs)
month_probability_list.append(100*hit_counter/total_number_of_trajs)
# month_list_list.append(hit_age)
month_mean_time.append(np.mean(hit_age))
month_std_time.append(np.std(hit_age))
return month_probability_list, np.array(month_mean_time), hit_counter_list, total_counter_list, np.array(month_std_time)
# BOM
def Lidar_compile_and_convert_txt_to_dict(main_folder_path):
# main_folder_path = 'D:\Data\LIDAR Data\\'
# create the full file list
filename_list = []
path_folders_list = next(os.walk(main_folder_path))[1]
for sub_folder in path_folders_list:
if sub_folder[0] == '2':
path_sub_folders_list = next(os.walk(main_folder_path + sub_folder + '\\'))[1]
for sub_sub_folder in path_sub_folders_list:
path_sub_sub_sub = main_folder_path + sub_folder + '\\' + sub_sub_folder + '\\'
ssss_filelist = sorted(glob.glob(str(path_sub_sub_sub + '*.*')))
for filename_min in ssss_filelist:
filename_list.append(filename_min)
total_files = len(filename_list)
print(' number of files to compile:', str(total_files))
# get first file to get shape
convertion_output = Lidar_convert_txt_to_array(filename_list[0])
range_shape = convertion_output[1].shape[0]
# create arrays
time_array = np.zeros(total_files)
range_array = convertion_output[1][:,0]
ch0_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch0_mrg = np.zeros((total_files, range_shape), dtype=float)
ch1_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch1_mrg = np.zeros((total_files, range_shape), dtype=float)
ch2_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch2_mrg = np.zeros((total_files, range_shape), dtype=float)
print('arrays initialized')
# populate arrays
for i_, filename_ in enumerate(filename_list):
p_progress(i_, total_files)
convertion_output = Lidar_convert_txt_to_array(filename_)
time_array[i_] = convertion_output[0]
ch0_pr2[i_, :] = convertion_output[1][:,1]
ch0_mrg[i_, :] = convertion_output[1][:,2]
ch1_pr2[i_, :] = convertion_output[1][:,3]
ch1_mrg[i_, :] = convertion_output[1][:,4]
ch2_pr2[i_, :] = convertion_output[1][:,5]
ch2_mrg[i_, :] = convertion_output[1][:,6]
# move to dict
output_dict = {}
output_dict['time'] = time_array
output_dict['range'] = range_array
output_dict['ch0_pr2'] = ch0_pr2
output_dict['ch0_mrg'] = ch0_mrg
output_dict['ch1_pr2'] = ch1_pr2
output_dict['ch1_mrg'] = ch1_mrg
output_dict['ch2_pr2'] = ch2_pr2
output_dict['ch2_mrg'] = ch2_mrg
return output_dict
def Lidar_convert_txt_to_array(filename_):
file_time_str = filename_[-25:-6]
time_stamp_seconds = time_str_to_seconds(file_time_str, '%Y-%m-%d_%H-%M-%S')
# read the data into an array
data_array_raw = genfromtxt(filename_,dtype=float, delimiter='\t',skip_header=133)
# only keep one altitude column
data_array_out = np.zeros((data_array_raw.shape[0], 7), dtype=float)
data_array_out[:,0] = data_array_raw[:,0]
data_array_out[:,1] = data_array_raw[:,1]
data_array_out[:,2] = data_array_raw[:,2]
data_array_out[:,3] = data_array_raw[:,4]
data_array_out[:,4] = data_array_raw[:,5]
data_array_out[:,5] = data_array_raw[:,7]
data_array_out[:,6] = data_array_raw[:,8]
return time_stamp_seconds, data_array_out
def compile_AWAP_precip_datafiles(file_list):
# load first file to get shape
print('loading file: ', file_list[0])
arr_1, start_date_sec_1 = load_AWAP_data(file_list[0])
rows_ = arr_1.shape[0]
columns_ = arr_1.shape[1]
# create lat and lon series
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112, 156.29, 0.05)
# create time array
output_array_time = np.zeros(len(file_list), dtype=float)
# create output array
output_array = np.zeros((len(file_list), rows_, columns_), dtype=float)
# load first array data into output array
output_array[0,:,:] = arr_1
output_array_time[0] = start_date_sec_1
# loop thru remainning files to populate ouput_array
for t_, filename_ in enumerate(file_list[1:]):
print('loading file: ', filename_)
arr_t, start_date_sec_t = load_AWAP_data(filename_)
output_array[t_+1, :, :] = arr_t
output_array_time[t_+1] = start_date_sec_t
return output_array, output_array_time, series_lat, series_lon
def load_AWAP_data(filename_):
start_date_str = filename_.split('\\')[-1][:8]
# stop_date_str = filename_.split('\\')[-1][8:16]
start_date_sec = time_str_to_seconds(start_date_str, '%Y%m%d')
arr_precip = np.genfromtxt(filename_, float, skip_header=6, skip_footer=18)
return arr_precip , start_date_sec
def get_means_from_filelist(file_list, lat_lon_ar):
# lat_lon_points_list = [ 147.8,
# 149,
# -36.8,
# -35.4]
# box domain indexes
index_c = [716, 740]
index_r = [508, 536]
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112,156.3,0.05)
lat_index_list, lon_index_list = find_index_from_lat_lon(series_lat, series_lon, lat_lon_ar[:,1], lat_lon_ar[:,0])
time_secs_list = []
precip_array = np.zeros((277,9),dtype=float)
for r_, filename_ in enumerate(file_list):
print('loading file: ', filename_)
arr_precip, start_date_sec = load_AWAP_data(filename_)
time_secs_list.append(start_date_sec)
precip_array[r_, 0] = start_date_sec
precip_array[r_, 1] = np.mean(arr_precip[index_r[0]:index_r[1]+1, index_c[0]:index_c[1]+1])
for i_ in range(2,9):
precip_array[r_, i_] = arr_precip[lat_index_list[i_-2],lon_index_list[i_-2]]
save_array_to_disk(['box mean precip [mm]','1 precip [mm]','2 precip [mm]','3 precip [mm]',
'4 precip [mm]','5 precip [mm]','6 precip [mm]','7 precip [mm]'],
precip_array[:,0], precip_array[:,1:], 'C:\\_output\\test_fimi_2.csv')
# save_HVF(['box','1','2','3','4','5','6','7'], precip_array, 'C:\\_output\\test_fimi_1.csv')
print("done")
return precip_array
def compile_BASTA_days_and_save_figure(directory_where_nc_file_are):
# compile BASTA data per day and save plot (per day)
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_label = path_input.split('/')[-4] + '_' + path_input.split('/')[-3] + '_' + path_input.split('/')[-2] + '_'
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
first_day_str = file_list_all[0][-18:-10]
last_day_str = file_list_all[-1][-18:-10]
first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
total_number_of_days = last_day_int - first_day_int
print('The data in the folder encompasses', total_number_of_days, 'days')
days_list_int = np.arange(first_day_int, last_day_int + 1)
days_list_str = time_seconds_to_str(time_days_to_seconds(days_list_int),'%Y%m%d')
for day_str in days_list_str:
print('-|' * 20)
file_list_day = sorted(glob.glob(str(path_input + file_label + day_str + '*.nc')))
print('Compiling day', day_str, len(file_list_day), 'files found for this day.')
if len(file_list_day) > 0:
filename_ = file_list_day[0]
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
# variable_names = sorted(netcdf_file_object.variables.keys())
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
compiled_time_days = time_seconds_to_days(np.array(time_raw, dtype=int) + file_first_time_stamp)
compiled_raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_range_array = netcdf_file_object.variables['range'][:].copy()
netcdf_file_object.close()
if len(file_list_day) > 1:
for filename_ in file_list_day[1:]:
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_days = time_seconds_to_days(np.array(time_raw, dtype = int) + file_first_time_stamp)
compiled_time_days = np.append(compiled_time_days, time_days)
raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_raw_reflectivity_array = np.vstack((compiled_raw_reflectivity_array,
raw_reflectivity_array))
netcdf_file_object.close()
figure_output_name = path_input + file_label + day_str + '.png'
print('saving figure to:', figure_output_name)
p_arr_vectorized_2(compiled_raw_reflectivity_array, compiled_time_days, compiled_range_array/1000,
cmap_=default_cm, figsize_=(12, 8), vmin_=80, vmax_=140,
cbar_label='Raw Reflectivity dB', x_header='UTC',y_header='Range AGL [km]',
figure_filename=figure_output_name,
time_format_ = '%H')
def compile_BASTA_into_one_file(directory_where_nc_file_are):
# compile BASTA data into one netcdf file
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
# first_day_str = file_list_all[0][-18:-10]
# last_day_str = file_list_all[-1][-18:-10]
# first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
# last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
# days_list_int = np.arange(first_day_int, last_day_int + 1)
# create copy of first file
netcdf_file_object = nc.Dataset(file_list_all[-1], 'r')
last_second_raw = netcdf_file_object.variables['time'][:][-1]
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
netcdf_file_object.close()
last_second_epoc = last_second_raw + file_first_time_stamp
last_time_str = time_seconds_to_str(last_second_epoc, '%Y%m%d_%H%M%S')
output_filename = file_list_all[0][:-3] + '_' + last_time_str + '.nc'
shutil.copyfile(file_list_all[0], output_filename)
print('Created output file with name:', output_filename)
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
file_first_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_output_file_object.variables['time'].units,
time_format_basta)
variable_names = sorted(netcdf_output_file_object.variables.keys())
# create references to variables in output file
variable_objects_dict = {}
for var_name in variable_names:
variable_objects_dict[var_name] = netcdf_output_file_object.variables[var_name]
for filename_ in file_list_all[1:]:
print('-' * 5)
print('loading file:', filename_)
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# create file's time series
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
row_start = variable_objects_dict['time'].shape[0]
row_end = time_raw.shape[0] + row_start
# append time array
variable_objects_dict['time'][row_start:row_end] = time_seconds_epoc - file_first_time_stamp_seconds_epoc
# append raw_reflectivity array
variable_objects_dict['raw_reflectivity'][row_start:row_end] = \
netcdf_file_object.variables['raw_reflectivity'][:].copy()
# append raw_velocity array
variable_objects_dict['raw_velocity'][row_start:row_end] = \
netcdf_file_object.variables['raw_velocity'][:].copy()
# append all other variables that only time dependent
for var_name in variable_names:
if var_name != 'time' and var_name != 'range' and \
var_name != 'raw_reflectivity' and var_name != 'raw_velocity':
if len(netcdf_file_object.variables[var_name].shape) == 1:
variable_objects_dict[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_BASTA_data_from_netcdf_to_arrays(filename_):
# load BASTA data from netcdf to arrays
# path_input = '/home/luis/Data/BASTA/L0/'
# filename_ = path_input + 'BASTA_L0_12m5_20180606_071716_20180806_025422.nc'
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# load time as seconds and days
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units, time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
time_days_epoc = time_seconds_to_days(time_seconds_epoc)
# append range array
array_range = netcdf_file_object.variables['range'][:].copy()
# append raw_reflectivity array
array_raw_reflectivity = netcdf_file_object.variables['raw_reflectivity']#[:].copy()
# append raw_velocity array
array_raw_velocity = netcdf_file_object.variables['raw_velocity']#[:].copy()
# close file
# netcdf_file_object.close()
return array_raw_reflectivity, array_raw_velocity, array_range, time_seconds_epoc, time_days_epoc
def BASTA_load_period_to_dict(start_time_YMDHM, stop_time_YMDHM, folder_path,
variable_names=('time', 'range', 'raw_reflectivity', 'raw_velocity')):
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
out_dict = {}
temp_dict = {}
variables_with_time_dimension = []
if not 'time' in variable_names:
variable_names_temp_list = ['time']
for variable_name in variable_names:
variable_names_temp_list.append(variable_name)
variable_names = variable_names_temp_list
# data_folder
data_folder = folder_path
# get all data files filenames
file_list = sorted(glob.glob(str(data_folder + '\\*.nc')))
file_times_tuple_list = []
file_times_tuple_list_str = []
for i_, filename_ in enumerate(file_list):
file_time_str_start = filename_.split('_')[-2] + filename_.split('_')[-1].split('.')[0]
file_time_sec_start = time_str_to_seconds(file_time_str_start, '%Y%m%d%H%M%S')
if i_ < len(file_list) -1:
file_time_str_stop = file_list[i_+1].split('_')[-2] + file_list[i_+1].split('_')[-1].split('.')[0]
file_time_sec_stop = time_str_to_seconds(file_time_str_stop, '%Y%m%d%H%M%S')
else:
file_time_sec_stop = file_time_sec_start + (24*60*60)
file_times_tuple_list.append(tuple((file_time_sec_start, file_time_sec_stop)))
file_times_tuple_list_str.append(tuple((file_time_str_start, time_seconds_to_str(file_time_sec_stop,
'%Y%m%d%H%M%S'))))
# select only files inside time range
event_start_sec = time_str_to_seconds(start_time_YMDHM, '%Y%m%d%H%M')
event_stop_sec = time_str_to_seconds(stop_time_YMDHM, '%Y%m%d%H%M')
selected_file_list = []
for file_index in range(len(file_list)):
if event_start_sec <= file_times_tuple_list[file_index][0] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif event_start_sec <= file_times_tuple_list[file_index][1] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_start_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_stop_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
print('found files:')
p_(selected_file_list)
# load data
if len(selected_file_list) == 0:
print('No files inside time range!')
return out_dict
else:
cnt = 0
for filename_ in selected_file_list:
if cnt == 0:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:',filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = time_seconds_epoc
else:
temp_dict[variable_name] = nc_file.variables[variable_name][:].filled(np.nan)
nc_file.close()
cnt += 1
else:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:', filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if len(nc_file.variables[variable_name].shape) == 1:
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = np.hstack((temp_dict[variable_name], time_seconds_epoc))
else:
temp_dict[variable_name] = np.hstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
else:
temp_dict[variable_name] = np.vstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
nc_file.close()
# find row for start and end of event
start_row = np.argmin(np.abs(temp_dict['time'] - event_start_sec))
end_row = np.argmin(np.abs(temp_dict['time'] - event_stop_sec))
for variable_name in variable_names:
if variable_name in variables_with_time_dimension:
out_dict[variable_name] = temp_dict[variable_name][start_row:end_row]
else:
out_dict[variable_name] = temp_dict[variable_name]
return out_dict
def MRR_CFAD(range_array, Ze_array, bins_=(12, np.arange(-10, 40, 2)), normalize_height_wise = True, x_header='dBZe',
y_header='Height [km]', custom_y_range_tuple=None, custom_x_range_tuple=None, figure_filename=None,
cbar_label='', cmap_=default_cm, figsize_ = (10,6), title_str = '', contourF_=True, cbar_format='%.2f',
vmin_=None,vmax_=None, grid_=True, fig_ax=None, show_cbar=True, level_threshold_perc=10,
invert_y=False, levels=None,custom_ticks_x=None, custom_ticks_y=None, cbar_ax=None):
if len(range_array.shape) == 1:
temp_array = np.zeros((Ze_array.shape))
for r_ in range(Ze_array.shape[0]):
temp_array[r_,:] = range_array
range_array = temp_array
if type(bins_[0]) == int:
if bins_[0] < 1:
bins_ = (int(range_array.shape[1] * bins_[0]), bins_[1])
hist_out = np.histogram2d(range_array.flatten()[~np.isnan(Ze_array.flatten())] / 1000,
Ze_array.flatten()[~np.isnan(Ze_array.flatten())],
normed=False, bins=bins_)
hist_array, hist_r, hist_c = hist_out
hist_r = (hist_r[:-1] + hist_r[1:]) * 0.5
hist_c = (hist_c[:-1] + hist_c[1:]) * 0.5
hist_r_2d = np.zeros((hist_array.shape), dtype=float)
hist_c_2d = np.zeros((hist_array.shape), dtype=float)
for r_ in range(hist_array.shape[0]):
for c_ in range(hist_array.shape[1]):
hist_r_2d[r_, c_] = hist_r[r_]
hist_c_2d[r_, c_] = hist_c[c_]
# normalize height wise
if normalize_height_wise:
heights_counts = np.sum(hist_array, axis=1)
maximum_count_at_some_height = np.max(heights_counts)
cbar_label_final = 'Height normalized frequency'
for r_ in range(hist_array.shape[0]):
if heights_counts[r_] < maximum_count_at_some_height * (level_threshold_perc/100):
hist_array[r_, :] = np.nan
else:
hist_array[r_, :] = hist_array[r_, :] / heights_counts[r_]
else:
cbar_label_final = 'Normalized frequency'
if cbar_label == '': cbar_label = cbar_label_final
fig_ax = p_arr_vectorized_3(hist_array, hist_c_2d, hist_r_2d, contourF_=contourF_, grid_=grid_,
custom_y_range_tuple=custom_y_range_tuple, custom_x_range_tuple=custom_x_range_tuple,
x_header=x_header, y_header=y_header, cmap_=cmap_, figsize_=figsize_, cbar_ax=cbar_ax,
cbar_label=cbar_label, title_str=title_str, vmin_=vmin_, vmax_=vmax_,levels=levels,
figure_filename=figure_filename, fig_ax=fig_ax,show_cbar=show_cbar, invert_y=invert_y,
custom_ticks_x=custom_ticks_x, custom_ticks_y=custom_ticks_y,cbar_format=cbar_format)
return fig_ax, hist_array.T, hist_c[:-1], hist_r[:-1]
# parsivel
def create_DSD_plot(DSD_arr, time_parsivel_seconds, size_arr, events_period_str, figfilename='',
output_data=False, x_range=(0, 7.5), y_range=(-1, 3.1), figsize_=(5, 5)):
size_series = size_arr[0, :]
event_row_start = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[0])
event_row_stop_ = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[1])
# normalize
DSD_arr_over_D = DSD_arr / size_arr
DSD_arr_over_D_by_D = np.sum(DSD_arr_over_D, axis=1)
DSD_arr_over_D_by_D_no_zero = DSD_arr_over_D_by_D * 1
DSD_arr_over_D_by_D_no_zero[DSD_arr_over_D_by_D_no_zero == 0] = np.nan
DSD_arr_over_D_by_D_log = np.log10(DSD_arr_over_D_by_D_no_zero)
DSD_arr_over_D_by_D_log_event_1_bin = np.array(DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_])
DSD_arr_over_D_by_D_log_event_1_bin[~np.isnan(DSD_arr_over_D_by_D_log_event_1_bin)] = 1
DSD_arr_over_D_by_D_log_event_1_bin_sum = np.nansum(DSD_arr_over_D_by_D_log_event_1_bin, axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD = np.nanmean(np.array(
DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_]), axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD[DSD_arr_over_D_by_D_log_event_1_bin_sum < 10] = np.nan
fig, ax = plt.subplots(figsize=figsize_)
ax.set_title('Mean value of drop concentrations in each diameter bin')
ax.set_xlabel('D [mm]')
ax.set_ylabel('log10 N(D) [m-3 mm-1]')
ax.plot(size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD, '-or', label='Event 1')
ax.set_xlim(x_range)
ax.set_ylim(y_range)
ax.grid()
if figfilename != '':
fig.savefig(figfilename, transparent=True, bbox_inches='tight')
plt.close(fig)
if output_data:
return size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD
def parsivel_nc_format_V2(input_filename, output_filename):
"""
Transform the not so good nc V1 version produced by save_parsivel_arrays_to_netcdf to V2
:param input_filename: output from save_parsivel_arrays_to_netcdf
:param output_filename: a path and filename
:return:
"""
# create file
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('created new file')
netcdf_first_file_object = nc.Dataset(input_filename)
# create attributes
netcdf_output_file_object.setncattr('author', '<NAME> (<EMAIL>')
netcdf_output_file_object.setncattr('version', 'V2')
netcdf_output_file_object.setncattr('created', time_seconds_to_str(time.time(), '%Y-%m-%d_%H:%M UTC'))
print('added attributes')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
variable_names_list = sorted(netcdf_first_file_object.variables)
# create dimensions
for dim_name in dimension_names_list:
if dim_name == 'time':
netcdf_output_file_object.createDimension('time', size=0)
print('time', 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
# time
var_name = 'time'
netcdf_output_file_object.createVariable(var_name, 'int64', (var_name,), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units',
'seconds since ' + time_seconds_to_str(0, time_format_mod))
time_parsivel_seconds = time_str_to_seconds(np.array(netcdf_first_file_object.variables[var_name][:], dtype=str),
time_format_parsivel)
netcdf_output_file_object.variables[var_name][:] = np.array(time_parsivel_seconds, dtype='int64')
print('created time variable')
# time_YmdHM
var_name = 'YYYYmmddHHMM'
netcdf_output_file_object.createVariable(var_name, 'str', ('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'YYYYmmddHHMM in string type')
netcdf_output_file_object.variables[var_name][:] = np.array(netcdf_first_file_object.variables['time'][:],
dtype=str)
print('created time_YmdHM variable')
# particle_fall_speed
var_name = 'particles_spectrum'
if var_name in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'particle counts per bin per minute')
netcdf_output_file_object.variables[var_name].setncattr('description',
'for each time stamp, the array varies with respect'
' to fall speed on the y axis (rows) starting from the top'
' and varies with respect to size on the x axis (columns) '
'starting from the left')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('created particles_spectrum variable')
# particle_fall_speed
var_name = 'particle_fall_speed'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_fall_speed',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'm/s')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:, 0].copy()
print('created particle_fall_speed variable')
# particle_size
var_name = 'particle_size'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][0, :].copy()
print('created particle_size variable')
# precipitation_intensity
var_name = 'precipitation_intensity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables[
'Intensity of precipitation (mm|h)'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm/h')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
print('created precipitation_intensity variable')
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_SYNOP_WaWa'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dtype,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code SYNOP WaWa'][:].copy()
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_METAR_SPECI'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dtype,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code METAR|SPECI'][:].copy()
print('created weather_code_METAR_SPECI variable')
# Weather_code_NWS
var_name = 'weather_code_NWS'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code NWS'].dtype,
netcdf_first_file_object.variables['Weather code NWS'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
NWS_description = '''precip_type_dict = {
'C': 'No Precip',
'Kein Niederschlag': 'No Precip',
'A': 'Hail',
'L': 'Drizzle',
'L+': 'heavy Drizzle',
'L-': 'light Drizzle',
'R': 'Rain',
'R+': 'heavy Rain',
'R-': 'light Rain',
'RL': 'Drizzle and Rain',
'RL+': 'heavy Drizzle and Rain',
'RL-': 'light Drizzle and Rain',
'RLS': 'Rain, Drizzle and Snow',
'RLS+': 'heavy Rain, Drizzle and Snow',
'RLS-': 'light Rain, Drizzle and Snow',
'S': 'Snow',
'S+': 'heavy Snow',
'S-': 'light Snow',
'SG': 'Snow Grains',
'SP': 'Freezing Rain'
}'''
netcdf_output_file_object.variables[var_name].setncattr('description', NWS_description)
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code NWS'][:].copy()
print('created weather_code_NWS variable')
# Radar_reflectivity (dBz)
var_name = 'radar_reflectivity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables['Radar reflectivity (dBz)'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'dBz')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Radar reflectivity (dBz)'][:], dtype=float)
print('created radar_reflectivity variable')
# particle_count
var_name = 'particle_count'
netcdf_output_file_object.createVariable(var_name,
'int64',
netcdf_first_file_object.variables[
'Number of detected particles'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'counts')
netcdf_output_file_object.variables[var_name].setncattr('description', 'Number of detected particles per minute')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Number of detected particles'][:], dtype='int64')
print('created particle_count variable')
# particle_concentration_spectrum
var_name = 'particle_concentration_spectrum'
var_name_old = 'particle_concentration_spectrum_m-3'
if var_name_old in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particles per meter cube per class')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created particle_concentration_spectrum variable')
# N_total
var_name = 'N_total'
var_name_old = 'particle_concentration_total_m-3'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'total particles per meter cube')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created N_total variable')
# psd
var_name = 'psd'
var_name_old = 'particle_concentration_spectrum_m-3'
netcdf_output_file_object.createVariable(var_name,
'float',
('time', 'particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particle size distribution, same as '
'particle_concentration_spectrum but all speeds'
'bins are summed, only varies with time and size')
netcdf_output_file_object.variables[var_name][:] = np.sum(netcdf_first_file_object.variables[var_name_old][:],
axis=1)
print('created psd variable')
# rain mask
rain_only_list = ['R', 'R+', 'R-']
RR_ = np.array(netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
NWS_ = netcdf_first_file_object.variables['Weather code NWS'][:].copy()
rain_mask = np.zeros(RR_.shape[0], dtype=int) + 1
for r_ in range(RR_.shape[0]):
if RR_[r_] > 0 and NWS_[r_] in rain_only_list:
rain_mask[r_] = 0
var_name = 'rain_mask'
netcdf_output_file_object.createVariable(var_name,
'int',
('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '0 if rain, 1 if not rain')
netcdf_output_file_object.variables[var_name].setncattr('description', 'using the NWS code, only used R, R+ and R-')
netcdf_output_file_object.variables[var_name][:] = rain_mask
print('rain_mask')
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
def parsivel_sampling_volume(particle_size_2d, particle_fall_speed_2d):
sampling_area = 0.18 * (0.03 - ((particle_size_2d/1000) / 2)) # m2
sampling_time = 60 # seconds
sampling_height = particle_fall_speed_2d * sampling_time # meters
sampling_volume_2d = sampling_area * sampling_height # m3
return sampling_volume_2d
def load_parsivel_txt_to_array(filename_, delimiter_=';'):
# filename_ = 'C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt'
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
# read parsivel file
spectrum_array_list = []
data_array_list = []
with open(filename_) as file_object:
header_ = file_object.readline().split(delimiter_)
line_str = file_object.readline()
line_split = np.array(line_str.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
for line in file_object:
line_split = np.array(line.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
if line_split[0] != 'Date':
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
data_array = np.stack(data_array_list)
spectrum_array = np.stack(spectrum_array_list).astype(float)
t_list = []
for t_ in range(data_array.shape[0]):
t_list.append(data_array[t_][0] + ' ' + data_array[t_][1])
if len(header_) == 16:
# no spectra was set to record
return data_array, None, t_list, size_array, speed_array, header_
else:
return data_array, spectrum_array, t_list, size_array, speed_array, header_
def save_parsivel_arrays_to_netcdf(raw_spectra_filename, nedcdf_output_filename,
delimiter_=';', raw_time_format='%d.%m.%Y %H:%M:%S'):
# save_parsivel_arrays_to_netcdf('C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt', 'C:\\_input\\parsivel_compiled_3.nc')
print('reading txt to array')
data_array, spectrum_array, t_list, size_array, speed_array, header_ = \
load_parsivel_txt_to_array(raw_spectra_filename, delimiter_=delimiter_)
print('arrays created')
file_attributes_tuple_list = [('Compiled by', '<NAME> @: ' + str(datetime.datetime.now())),
('Data source', 'Parsivel Disdrometer'),
('time format', 'YYYYMMDDHHmm in uint64 data type, each ' +
'time stamp is the acumulated precip for one minute')]
# time from str to int
time_array = np.zeros(data_array.shape[0], dtype='<U12')
# for t_ in range(data_array.shape[0]):
# time_array[t_] = int(t_list[t_][6:10] + # YYYY
# t_list[t_][3:5] + # MM
# t_list[t_][:2] + # DD
# t_list[t_][12:14] + # HH
# t_list[t_][15:17]) # mm
for t_ in range(data_array.shape[0]):
time_array[t_] = int(time_seconds_to_str(time_str_to_seconds(t_list[t_],raw_time_format),
time_format_parsivel))
pollutant_attributes_tuple_list = [('units', 'particles per minute')]
# create output file
file_object_nc4 = nc.Dataset(nedcdf_output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# create dimensions
file_object_nc4.createDimension('particle_fall_speed', speed_array.shape[0])
file_object_nc4.createDimension('particle_size', size_array.shape[1])
file_object_nc4.createDimension('time', time_array.shape[0])
# create dimension variables
file_object_nc4.createVariable('particle_fall_speed', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('particle_size', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('time', 'u8', ('time',), zlib=True)
# populate dimension variables
file_object_nc4.variables['time'][:] = time_array[:]
file_object_nc4.variables['particle_fall_speed'][:] = speed_array[:]
file_object_nc4.variables['particle_size'][:] = size_array[:]
# create particles_spectrum array
if spectrum_array is not None:
file_object_nc4.createVariable('particles_spectrum', 'u2',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particles_spectrum'][:] = spectrum_array[:]
# create particle_concentration_spectrum_m-3
# get sampling volume
sampling_volume_2d = parsivel_sampling_volume(size_array, speed_array)
particle_concentration_spectrum = spectrum_array / sampling_volume_2d
# create variable
file_object_nc4.createVariable('particle_concentration_spectrum_m-3', 'float32',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_spectrum_m-3'][:] = particle_concentration_spectrum[:]
# create particle_concentration_total_m-3
particle_concentration_total = np.nansum(np.nansum(particle_concentration_spectrum, axis=-1), axis=-1)
# create variable
file_object_nc4.createVariable('particle_concentration_total_m-3', 'float32',
('time', ), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_total_m-3'][:] = particle_concentration_total[:]
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object_nc4.variables['particles_spectrum'], attribute_[0], attribute_[1])
# create other data variables
for i_, head_ in enumerate(header_[:-1]):
var_name = head_.replace('/','|')
print('storing var name: ' , var_name)
temp_ref = file_object_nc4.createVariable(var_name, str, ('time',), zlib=True)
temp_ref[:] = data_array[:, i_]
for attribute_ in file_attributes_tuple_list:
setattr(file_object_nc4, attribute_[0], attribute_[1])
file_object_nc4.close()
print('Done!')
def load_parsivel_from_nc(netcdf_filename):
netcdf_file_object = nc.Dataset(netcdf_filename, 'r')
file_var_values_dict = {}
variable_name_list = netcdf_file_object.variables.keys()
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
netcdf_file_object.close()
return file_var_values_dict, variable_name_list
def parsivel_plot_spectrum_counts(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, bins_=(0,2,5,10,20,50,100,200), fig_size=(5,5)):
cmap_parsivel = ListedColormap(['white', 'yellow', 'orange', 'lime', 'darkgreen',
'aqua', 'purple', 'navy', 'red'], 'indexed')
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
spectrum_array_color = np.zeros((arr_.shape[0], arr_.shape[1]), dtype=float)
bin_labels = []
i_ = 0
for i_, bin_ in enumerate(bins_):
spectrum_array_color[arr_ > bin_] = i_ + 1
bin_labels.append(str(bin_))
bin_labels[i_] = '>' + bin_labels[i_]
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
else:
quad1 = ax.pcolormesh(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cbar_label = 'Particles per bin'
cb2 = fig.colorbar(quad1)#, ticks=[0,1,2,3,4,5,6,7])
ticks_ = np.linspace(0.5, i_+0.5, len(bins_))
cb2.set_ticks(ticks_)
cb2.set_ticklabels(bin_labels)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def parsivel_plot_spectrum_DSD(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, fig_size=(5,5), cmap_=default_cm, cbar_label='DSD [m-3]',
nozeros_=True, vmin_=None, vmax_=None,):
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
if nozeros_:
arr_ = np.array(arr_)
arr_[arr_ == 0] = np.nan
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, arr_, cmap=cmap_)
else:
quad1 = ax.pcolormesh(size_array, speed_array, arr_, cmap=cmap_, vmin=vmin_, vmax=vmax_)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cb2 = fig.colorbar(quad1)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def calculate_cumulative_precipitation_parsivel(parsivel_precipitation_mm_per_hour, parsivel_time_sec, time_period_str):
return np.nansum(
parsivel_precipitation_mm_per_hour[time_to_row_str(parsivel_time_sec, time_period_str.split('_')[0]):
time_to_row_str(parsivel_time_sec, time_period_str.split('_')[1])]) / 60
def calculate_D_m(N_D, D_series):
D_grad = np.gradient(D_series)
D_m = np.nansum((N_D * (D_series**4) * D_grad)) / np.nansum((N_D * (D_series ** 3) * D_grad))
return D_m
def calculate_LWC(N_D, D_series):
D_grad = np.gradient(D_series)
water_density = 1E6 # g/m3
LWC_ = (np.pi * water_density / 6) * np.nansum((N_D * (D_series**3) * D_grad))
return LWC_
# Holographic microscope
def convert_raw_to_array(filename_):
print('converting file: ' + filename_.split('/')[-1])
A = np.fromfile(filename_, dtype='uint8')
evenEl = A[1::2]
oddEl = A[0::2]
B = 256 * evenEl + oddEl
width = 2592
height = 1944
I = B.reshape(height, width)
return I
def create_video_from_filelist(file_list, output_filename, cmap_):
width = 2592
height = 1944
array_3d = np.zeros((len(file_list), height, width), dtype='uint8')
time_list = []
for t_, filename_ in enumerate(file_list):
array_3d[t_,:,:] = convert_raw_to_array(filename_)
time_list.append(filename_[-21:-4])
create_video_animation_from_3D_array(array_3d, output_filename, colormap_= cmap_, title_list=time_list,
axes_off=True, show_colorbar=False, interval_=500)
def convert_array_to_png_array(array_):
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
return out_array
# netcdf file handling
def netCDF_crop_timewise(input_filename, time_stamp_start_str_YYYYmmDDHHMM, time_stamp_stop_str_YYYYmmDDHHMM,
output_filename=None, vars_to_keep=None, time_dimension_name='time'):
"""
Creates a copy of an input netCDF4 file with only a subset of the data
:param input_filename: netCDF4 file with path
:param time_stamp_start_str_YYYYmmDDHHMMSS: String in YYYYmmDDHHMMSS format
:param time_stamp_stop_str_YYYYmmDDHHMMSS:
:param output_filename: filename with path and .nc extension. If none, output file will be in same folder as input
:param vars_to_keep: list of variable names in str to be kept in output copy. If none, all variables will be copied
:param time_dimension_name: name of time dimension
:return: 0 if good, filename if error
"""
error_file = 0
try:
nc_input_file = nc.Dataset(input_filename)
time_array = nc_input_file.variables[time_dimension_name][:].copy()
nc_input_file.close()
r_1 = time_to_row_str(time_array, time_stamp_start_str_YYYYmmDDHHMM)
r_2 = time_to_row_str(time_array, time_stamp_stop_str_YYYYmmDDHHMM)
dict_ = load_netcdf_to_dictionary(input_filename, var_list=vars_to_keep,
time_tuple_start_stop_row=(r_1,r_2), time_dimension_name=time_dimension_name)
if output_filename is None:
output_filename = input_filename[:-3] + '_trimmed_' + str(r_1) + '_' + str(r_2) + '.nc'
save_dictionary_to_netcdf(dict_, output_filename)
except BaseException as error_msg:
print(error_msg)
error_file = input_filename
return error_file
def add_variable_to_netcdf_file(nc_filename, variables_dict):
"""
Opens and adds a variable(s) to the file. Will not add new dimensions.
:param nc_filename: str including path
:param variables_dict:
must be a dictionary with keys as variables. inside each variables key should have a dictionary
inside with variable names as keys
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
Each var most have a attributes key equal to a list of tuples with name and description text
:return: None
"""
# check if dict_ has the right format
# create dimension and variables lists
vars_list = variables_dict.keys()
for var_ in vars_list:
if 'dimensions' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# open file
file_obj = nc.Dataset(nc_filename,'a')
print('file openned, do not close this threat or file might be corrupted')
try:
# check that variable shapes agree with destination file
for var_ in vars_list:
dim_list = list(variables_dict[var_]['dimensions'])
var_shape = variables_dict[var_]['data'].shape
for i_, dim_ in enumerate(dim_list):
if dim_ in sorted(file_obj.dimensions):
if var_shape[i_] == file_obj.dimensions[dim_].size:
pass
else:
print('Variable', var_, 'has dimension', dim_,
'of different size compared to destination file\nfile closed')
file_obj.close()
return
else:
print('Variable', var_, 'has dimension', dim_,
'which does not exist in destination file\nfile closed')
file_obj.close()
return
# create variables
print('creating', var_, 'variable')
file_obj.createVariable(var_,
variables_dict[var_]['data'].dtype,
variables_dict[var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = variables_dict[var_]['data']
for var_attr in variables_dict[var_]['attributes']:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
except BaseException as error_msg:
file_obj.close()
print('error, file closed\n', error_msg)
print('All good, closing file')
file_obj.close()
print('Done!')
def save_dictionary_to_netcdf(dict_, output_filename):
"""
Saves a dictionary with the right format to a netcdf file. First dim will be set to unlimited.
:param dict_: must have a dimensions key, a variables key, and a attributes key.
dimensions key should have a list of the names of the dimensions
variables key should have a dictionary inside with variable names as keys
attributes key should have a list of tuples inside, with the name of the attribute and description in each tuple
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
:param output_filename: should include full path and extension
:return: None
"""
# check if dict_ has the right format
if 'variables' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing variables key')
return
if 'dimensions' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing dimensions key')
return
if 'attributes' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing attributes key')
return
# create dimension and variables lists
vars_list = dict_['variables'].keys()
dims_list = dict_['dimensions']
for dim_ in dims_list:
if dim_ in vars_list:
pass
else:
print('dictionary has the wrong format, ' + dim_ + 'dimension is missing from variables')
for var_ in vars_list:
if 'dimensions' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# create output file
file_obj = nc.Dataset(output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# populate file's attributes
for attribute_ in dict_['attributes']:
setattr(file_obj, attribute_[0], attribute_[1])
# create dimensions
for i_, dim_ in enumerate(dims_list):
if i_ == 0:
file_obj.createDimension(dim_, size=0)
else:
shape_index = np.argwhere(np.array(dict_['variables'][dim_]['dimensions']) == dim_)[0][0]
file_obj.createDimension(dim_, dict_['variables'][dim_]['data'].shape[shape_index])
print('dimensions created')
# create variables
for var_ in vars_list:
print('creating', var_, 'variable')
file_obj.createVariable(var_,
dict_['variables'][var_]['data'].dtype,
dict_['variables'][var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = dict_['variables'][var_]['data']
for var_attr in dict_['variables'][var_]['attributes']:
if isinstance(var_attr, str):
setattr(file_obj.variables[var_], dict_['variables'][var_]['attributes'][0],
dict_['variables'][var_]['attributes'][1])
break
else:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
print('storing data to disk and closing file')
file_obj.close()
print('Done!')
def load_netcdf_to_dictionary(filename_, var_list=None, time_tuple_start_stop_row=None, time_dimension_name='time'):
"""
creates a dictionary from a netcdf file, with the following format
:param filename_: filename with path of a netCDF4 file
:param var_list: list of variables to be loaded, if none, all variables will be loaded
:param time_tuple_start_stop_str: tuple with two time rows, time dimension will be trimmed r_1:r_2
:param time_dimension_name: name of time dimension
:return: dict_: have a dimensions key, a variables key, and a attributes key.
Each var have a data key equal to a numpy array (can be masked) and a attribute key
Each var have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
"""
# create output dict
out_dict = {}
# open file
file_obj = nc.Dataset(filename_, 'r') # ,format='NETCDF4_CLASSIC')
print('output file started')
# get file's attr
file_att_list_tuple = []
for attr_ in file_obj.ncattrs():
file_att_list_tuple.append((attr_, file_obj.getncattr(attr_)))
out_dict['attributes'] = file_att_list_tuple
# get dimensions
out_dict['dimensions'] = sorted(file_obj.dimensions)
# get variables
if var_list is None:
var_list = sorted(file_obj.variables)
out_dict['variables'] = {}
# create variables
for var_ in var_list:
out_dict['variables'][var_] = {}
if time_tuple_start_stop_row is not None:
if time_dimension_name in file_obj.variables[var_].dimensions:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][time_tuple_start_stop_row[0]:
time_tuple_start_stop_row[1]]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
out_dict['variables'][var_]['attributes'] = file_obj.variables[var_].ncattrs()
var_att_list_tuple = []
for attr_ in file_obj.variables[var_].ncattrs():
var_att_list_tuple.append((attr_, file_obj.variables[var_].getncattr(attr_)))
out_dict['variables'][var_]['attributes'] = var_att_list_tuple
out_dict['variables'][var_]['dimensions'] = file_obj.variables[var_].dimensions
print('read variable', var_)
file_obj.close()
print('Done!')
return out_dict
def merge_multiple_netCDF_by_time_dimension(directory_where_nc_file_are_in_chronological_order, output_path='',
output_filename=None, time_variable_name='time', time_dimension_name=None,
vars_to_keep=None, nonTimeVars_check_list=None,
key_search_str='', seek_in_subfolders=False, force_file_list=None):
if force_file_list is not None:
file_list_all = sorted(force_file_list)
else:
if seek_in_subfolders:
if key_search_str == '':
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order))
else:
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order,
filter_str=key_search_str))
else:
file_list_all = sorted(glob.glob(str(directory_where_nc_file_are_in_chronological_order
+ '*' + key_search_str + '*.nc')))
print('Files to be merged (in this order):')
parameter_list = ''
for i, parameter_ in enumerate(file_list_all):
parameter_list = str(parameter_list) + str(i) + " ---> " + str(parameter_) + '\n'
print(parameter_list)
# create copy of first file
if output_filename is None:
if output_path == '':
output_filename = file_list_all[0][:-3] + '_merged.nc'
else:
output_filename = output_path + file_list_all[0].split('\\')[-1][:-3] + '_merged.nc'
# define time variable and dimension
if time_dimension_name is None:
time_dimension_name = time_variable_name
# check if time dimension is unlimited
netcdf_first_file_object = nc.Dataset(file_list_all[0], 'r')
if netcdf_first_file_object.dimensions[time_dimension_name].size == 0 and vars_to_keep is None:
# all good, just make copy of file with output_filename name
netcdf_first_file_object.close()
shutil.copyfile(file_list_all[0], output_filename)
print('first file in merger list has unlimited time dimension, copy created with name:', output_filename)
else:
# not so good, create new file and copy everything from first, make time dimension unlimited...
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('first file in merger list does not have unlimited time dimension, new file created with name:',
output_filename)
# copy main attributes
attr_list = netcdf_first_file_object.ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.setncattr(attr_, netcdf_first_file_object.getncattr(attr_))
print('main attributes copied')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
if vars_to_keep is None:
variable_names_list = sorted(netcdf_first_file_object.variables)
else:
variable_names_list = vars_to_keep
# create dimensions
for dim_name in dimension_names_list:
if dim_name == time_dimension_name:
netcdf_output_file_object.createDimension(time_dimension_name, size=0)
print(time_variable_name, 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
for var_name in variable_names_list:
# create
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
print(var_name, 'variable created')
# copy the attributes
attr_list = netcdf_first_file_object.variables[var_name].ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.variables[var_name].setncattr(attr_,
netcdf_first_file_object.variables[
var_name].getncattr(attr_))
print('variable attributes copied')
# copy the data to the new file
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('variable data copied')
print('-=' * 20)
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
print('starting to copy other files into merged file')
vars_list = variable_names_list
for filename_ in file_list_all[1:]:
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
print('-' * 5)
print('loading file:', filename_)
# open hourly file
netcdf_file_object = nc.Dataset(filename_, 'r')
# get time array
time_hourly = np.array(netcdf_file_object.variables[time_variable_name][:], dtype=float)
row_start = netcdf_output_file_object.variables[time_variable_name].shape[0]
row_end = time_hourly.shape[0] + row_start
# append time array
netcdf_output_file_object.variables[time_variable_name][row_start:row_end] = time_hourly
# append all other variables that only time dependent
for var_name in vars_list:
if var_name != time_variable_name:
if time_dimension_name in netcdf_output_file_object.variables[var_name].dimensions:
netcdf_output_file_object.variables[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
# check non time dependent variables for consistency
vars_list_sub = sorted(netcdf_file_object.variables)
if vars_list_sub != sorted(netcdf_first_file_object.variables):
print('Alert! Variables in first file are different than other files')
print('first file variables:')
p_(sorted(netcdf_first_file_object.variables))
print(filename_, 'file variables:')
p_(vars_list_sub)
if nonTimeVars_check_list is not None:
for var_name in nonTimeVars_check_list:
if np.nansum(np.abs(netcdf_file_object.variables[var_name][:].copy() -
netcdf_output_file_object.variables[var_name][:].copy())) != 0:
print('Alert!', var_name, 'from file:', filename_, 'does not match the first file')
# copy the attributes
netcdf_output_file_object.variables[var_name].setncattr(
'values from file ' + filename_, netcdf_file_object.variables[var_name][:].copy()
)
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_netcdf_file_variable(filename_, variable_name_list=None):
netcdf_file_object = nc.Dataset(filename_, 'r')
file_attributes_dict = {}
file_var_values_dict = {}
file_var_attrib_dict = {}
file_dim_dict = {}
if variable_name_list is None: variable_name_list = list(netcdf_file_object.variables)
for atr_ in netcdf_file_object._attributes:
file_attributes_dict[atr_] = netcdf_file_object._attributes[atr_]
for dim_ in netcdf_file_object.dimensions:
file_dim_dict[dim_] = netcdf_file_object.dimensions[dim_]
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
for atr_ in netcdf_file_object.variables[var_]._attributes:
file_var_attrib_dict[var_] = netcdf_file_object.variables[var_]._attributes[atr_]
netcdf_file_object.close()
return file_attributes_dict, file_var_values_dict, file_var_attrib_dict, file_dim_dict
def save_array_list_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# file_object.history = 'Created for a test'
for variable_ in range(len(array_list)):
dim_list_name = []
for dim_ in range(len(array_list[variable_].shape)):
dim_name = str(variable_) + '_' + str(dim_)
dim_list_name.append(dim_name)
file_object.createDimension(dim_name, array_list[variable_].shape[dim_])
dtype_ = str(array_list[variable_].dtype)[0]
file_object.createVariable( name_list[variable_], dtype_, tuple(dim_list_name) )
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]] = array_list[variable_]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_time_series_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# create time dimension
file_object.createDimension('time', array_list[0].shape[0])
for variable_ in range(len(array_list)):
dtype_ = str(array_list[variable_].dtype)[0]
if dtype_ == '<': dtype_ = 'S1'
file_object.createVariable(name_list[variable_], dtype_, ('time',))
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]][:] = array_list[variable_][:]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_emissions_to_new_netcdf(out_filename, emissions_array, pollutant_name, time_array, lat_array, lon_array,
file_attributes_tuple_list, pollutant_attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'w')
# create dimensions
file_object.createDimension('lat', lat_array.shape[0])
file_object.createDimension('lon', lon_array.shape[0])
file_object.createDimension('time', time_array.shape[0])
# create dimension variables
file_object.createVariable('time', str(time_array.dtype)[0], ('time', ))
file_object.createVariable('lat', str(lat_array.dtype)[0], ('lat',))
file_object.createVariable('lon', str(lon_array.dtype)[0], ('lon',))
# populate dimension variables
file_object.variables['time'][:] = time_array[:]
file_object.variables['lat'][:] = lat_array[:]
file_object.variables['lon'][:] = lon_array[:]
# create emission array
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
# populate
file_object.variables[pollutant_name][:] = emissions_array[:]
for attribute_ in file_attributes_tuple_list:
setattr(file_object, attribute_[0], attribute_[1])
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def save_emissions_to_existing_netcdf(out_filename, emissions_array, pollutant_name, attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'a')
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
file_object.variables[pollutant_name][:] = emissions_array[:]
setattr(file_object.variables[pollutant_name], 'pollutant name', pollutant_name)
for attribute_ in attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def WRF_emission_file_modify(filename_, variable_name, cell_index_west_east, cell_index_south_north, new_value):
netcdf_file_object = nc.Dataset(filename_, 'a')
current_array = netcdf_file_object.variables[variable_name][0,0,:,:].copy()
current_value = current_array[cell_index_south_north, cell_index_west_east]
print(current_value)
current_array[cell_index_south_north, cell_index_west_east] = new_value
netcdf_file_object.variables[variable_name][0,0,:,:] = current_array[:,:]
netcdf_file_object.close()
def find_wrf_3d_cell_from_latlon_to_south_north_west_east(lat_, lon_, wrf_output_filename,
wrf_lat_variablename='XLAT', wrf_lon_variablename='XLONG',
flatten_=False):
netcdf_file_object_wrf = nc.Dataset(wrf_output_filename, 'r')
wrf_lat_array = netcdf_file_object_wrf.variables[wrf_lat_variablename][:,:].copy()
wrf_lon_array = netcdf_file_object_wrf.variables[wrf_lon_variablename][:,:].copy()
netcdf_file_object_wrf.close()
wrf_abs_distance = ( (np.abs(wrf_lat_array - lat_)**2) + (np.abs(wrf_lon_array - lon_)**2) )**0.5
if flatten_:
return np.argmin(wrf_abs_distance)
else:
return np.unravel_index(np.argmin(wrf_abs_distance), wrf_abs_distance.shape)
# specialized tools
def vectorize_array(array_):
output_array = np.zeros((array_.shape[0] * array_.shape[1], 3), dtype=float)
for r_ in range(array_.shape[0]):
for c_ in range(array_.shape[1]):
output_array[r_,0] = r_
output_array[r_, 1] = c_
output_array[r_, 2] = array_[r_,c_]
return output_array
def exceedance_rolling(arr_time_seconds, arr_values, standard_, rolling_period, return_rolling_arrays=False):
## assumes data is in minutes and in same units as standard
time_secs_1h, values_mean_disc_1h = mean_discrete(arr_time_seconds, arr_values, 3600, arr_time_seconds[0], min_data=45)
values_rolling_mean = row_average_rolling(values_mean_disc_1h, rolling_period)
counter_array = np.zeros(values_rolling_mean.shape[0])
counter_array[values_rolling_mean > standard_] = 1
total_number_of_exceedances = np.sum(counter_array)
#create date str array
T_ = np.zeros((time_secs_1h.shape[0],5),dtype='<U32')
for r_ in range(time_secs_1h.shape[0]):
if time_secs_1h[r_] == time_secs_1h[r_]:
T_[r_] = time.strftime("%Y_%m_%d",time.gmtime(time_secs_1h[r_])).split(',')
exceedance_date_list = []
for r_, rolling_stamp in enumerate(values_rolling_mean):
if rolling_stamp > standard_:
exceedance_date_list.append(T_[r_])
exc_dates_array = np.array(exceedance_date_list)
exc_dates_array_unique = np.unique(exc_dates_array)
if return_rolling_arrays:
return total_number_of_exceedances, exc_dates_array_unique, time_secs_1h, values_rolling_mean
else:
return total_number_of_exceedances, exc_dates_array_unique
# ozonesonde and radiosonde related
def load_sonde_data(filename_, mode_='PBL'): ##Loads data and finds inversions, creates I_
# global V_, M_, H_, ASL_, time_header, I_, I_line
# global ASL_avr, L_T, L_RH, time_string, time_days, time_seconds, year_, flight_name
## user defined variables
delimiter_ = ','
error_flag = -999999
first_data_header = 'Day_[GMT]'
day_column_number = 0
month_column_number = 1
year_column_number = 2
hour_column_number = 3
minute_column_number = 4
second_column_number = 5
# time_header = 'Local Time' # defining time header
# main data array
sample_data = filename_
# look for data start (header size)
with open(sample_data) as file_read:
header_size = -1
r_ = 0
for line_string in file_read:
if (len(line_string) >= len(first_data_header) and
line_string[:len(first_data_header)] == first_data_header):
header_size = r_
break
r_ += 1
if header_size == -1:
print('no data found!')
sys.exit()
data_array = np.array(genfromtxt(sample_data,
delimiter=delimiter_,
skip_header=header_size,
dtype='<U32'))
# defining header and data arrays
M_ = data_array[1:, 6:].astype(float)
H_ = data_array[0, 6:]
ASL_ = M_[:, -1]
# year_ = data_array[1, year_column_number]
ASL_[ASL_ == error_flag] = np.nan
# defining time arrays
time_str = data_array[1:, 0].astype('<U32')
for r_ in range(time_str.shape[0]):
time_str[r_] = (str(data_array[r_ + 1, day_column_number]) + '-' +
str(data_array[r_ + 1, month_column_number]) + '-' +
str(data_array[r_ + 1, year_column_number]) + '_' +
str(data_array[r_ + 1, hour_column_number]) + ':' +
str(data_array[r_ + 1, minute_column_number]) + ':' +
str(data_array[r_ + 1, second_column_number]))
time_days = np.array([mdates.date2num(datetime.datetime.utcfromtimestamp(
calendar.timegm(time.strptime(time_string_record, '%d-%m-%Y_%H:%M:%S'))))
for time_string_record in time_str])
time_seconds = time_days_to_seconds(time_days)
V_ = M_.astype(float)
V_[V_ == error_flag] = np.nan
T_avr = np.ones(V_[:, 1].shape)
RH_avr = np.ones(V_[:, 1].shape)
ASL_avr = np.ones(V_[:, 1].shape)
L_T = np.zeros(V_[:, 1].shape)
L_RH = np.zeros(V_[:, 1].shape)
I_ = np.zeros(V_[:, 1].shape)
I_[:] = np.nan
# rolling average of T RH and ASL
mean_size = 7 # 5
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
T_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 1])
RH_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 2])
ASL_avr[r_] = np.nanmean(ASL_[r_ - mean_size: r_ + mean_size])
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if (ASL_avr[r_ + 1] - ASL_avr[r_]) > 0:
L_T[r_] = ((T_avr[r_ + 1] - T_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
L_RH[r_] = ((RH_avr[r_ + 1] - RH_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
# define location of inversion
# PBL or TSI
if mode_ == 'PBL':
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 7 and L_RH[r_] < -20: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and ASL_avr[r_] < 4:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
else:
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 20 and L_RH[r_] < -200: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and 4 < ASL_avr[r_] < 8:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
return H_, V_, time_days, time_seconds, I_, I_line, L_T, L_RH
def plot_X1_X2_Y(X1_blue, X2_green, Y):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ax1.plot(X1_blue, Y, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
ax2.scatter(X2_green, Y, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
plt.show()
def plot_T_RH_I_(V_, I_line):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Temp')
ax2.set_xlabel('RH')
ax1.scatter(V_[:, 1], ASL_, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
RH_temp = V_[:, 2]
RH_temp = RH_temp
ax2.scatter(RH_temp, ASL_, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def plot_ThetaVirtual_I_(V_, I_line):
fig, ax1 = plt.subplots()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Virtual Potential Temperature [K]')
ax1.scatter(V_[:, 5], ASL_, s=5, color='b', edgecolor='none')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def last_lat_lon_alt_ozonesonde(filename_):
data_array = genfromtxt(filename_, delimiter=',', dtype='<U32', skip_header=23)
return data_array[-1,31], data_array[-1,32], data_array[-1,33], data_array[-1,0]
def load_khancoban_sondes(filename_):
line_number = -1
dict_ = {}
dict_['filename'] = filename_.split('\\')[-1]
dict_['date'] = '20' + filename_.split('\\')[-1][2:]
profile_header = []
profile_units = []
profile_data = []
with open(filename_) as file_object:
for line in file_object:
line_number += 1
line_items = line.split()
if 17 <= line_number <= 35:
profile_header.append(line_items[0])
profile_units.append(line_items[1])
if line_number >= 39 and len(line_items)>1:
profile_data.append(line_items)
profile_array = np.zeros((len(profile_data), len(profile_data[0])), dtype=float)
for r_ in range(len(profile_data)):
profile_array[r_, :] = profile_data[r_]
for c_ in range(len(profile_header)):
dict_[profile_header[c_]] = {}
dict_[profile_header[c_]]['data'] = profile_array[:, c_]
dict_[profile_header[c_]]['units'] = profile_units[c_]
return dict_
def convert_khan_sonde_data_to_skewt_dict(khan_dict, sonde_name):
# create time array in seconds since epoc
date_seconds = time_str_to_seconds(khan_dict[sonde_name]['date'], '%Y%m%d.0%H')
time_sonde_sec = date_seconds + khan_dict[sonde_name]['time']['data']
mydata_0=dict(zip(('hght','pres','temp','dwpt', 'sknt', 'drct', 'relh', 'time', 'lati', 'long'),
(khan_dict[sonde_name]['Height']['data'],
khan_dict[sonde_name]['P']['data'],
kelvin_to_celsius(khan_dict[sonde_name]['T']['data']),
kelvin_to_celsius(khan_dict[sonde_name]['TD']['data']),
ws_ms_to_knots(khan_dict[sonde_name]['FF']['data']),
khan_dict[sonde_name]['DD']['data'],
khan_dict[sonde_name]['RH']['data'],
time_sonde_sec,
khan_dict[sonde_name]['Lat']['data'],
khan_dict[sonde_name]['Lon']['data']
)))
return mydata_0
# data averaging
def average_all_data_files(filename_, number_of_seconds, WD_index = None, WS_index = None,
min_data_number=None, cumulative_parameter_list=None):
header_, values_ = load_time_columns(filename_)
time_sec = time_days_to_seconds(values_[:,0])
# wind tratment
if WD_index is not None and WS_index is not None:
print('wind averaging underway for parameters: ' + header_[WD_index] + ' and ' + header_[WS_index])
# converting wind parameters to cartesian
WD_ = values_[:,WD_index]
WS_ = values_[:,WS_index]
North_, East_ = polar_to_cart(WD_, WS_)
values_[:,WD_index] = North_
values_[:,WS_index] = East_
# averaging
if min_data_number is None: min_data_number = int(number_of_seconds/60 * .75)
if cumulative_parameter_list is None:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx= None)
else:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx=np.array(cumulative_parameter_list) - 2)
if WD_index is not None and WS_index is not None:
# converting wind parameters to polar
North_ = Values_mean[:,WD_index - 2]
East_ = Values_mean[:,WS_index - 2]
WD_, WS_ = cart_to_polar(North_, East_)
Values_mean[:,WD_index - 2] = WD_
Values_mean[:,WS_index - 2] = WS_
output_filename = filename_.split('.')[0]
output_filename += '_' + str(int(number_of_seconds/60)) + '_minute_mean' + '.csv'
save_array_to_disk(header_[2:], Index_mean, Values_mean, output_filename)
print('Done!')
print('saved at: ' + output_filename)
def median_discrete(Index_, Values_, avr_size, first_index, min_data=1, position_=0.0):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
return None, None
if Index_[-1] < first_index:
return None, None
if min_data < 1:
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_median = np.zeros(total_averaged_rows)
Values_median[:] = np.nan
else:
Values_median = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_median[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
Values_25pr = np.array(Values_median)
Values_75pr = np.array(Values_median)
Std_ = np.array(Values_median)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
Values_median[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Values_25pr[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Values_75pr[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
Values_median[indx_avr_r,c_] = np.nanmedian(Values_[r_raw_a:r_raw_b,c_])
Values_25pr[indx_avr_r,c_] = np.nanpercentile(Values_[r_raw_a:r_raw_b,c_],25)
Values_75pr[indx_avr_r,c_] = np.nanpercentile(Values_[r_raw_a:r_raw_b,c_],75)
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b],c_)
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_median,Values_25pr,Values_75pr, Std_
def mean_discrete(Index_, Values_, avr_size, first_index,
min_data=1, position_=0., cumulative_parameter_indx=None, last_index=None, show_progress=True):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param avr_size: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param min_data: is minimum amount of data for average to be made (optional, default = 1)
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param cumulative_parameter_indx: in case there is any column in Values_ to be summed, not averaged. Most be a list
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
if min_data < 1:
print('error during shape check! min_data < 1')
return None, None
# initialize averaged matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
if show_progress: p_progress_bar(indx_avr_r, total_averaged_rows-2, extra_text='averaged')
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
if cumulative_parameter_indx is not None:
Values_mean[indx_avr_r] = np.nansum(Values_[r_raw_a:r_raw_b])
else:
Values_mean[indx_avr_r] = np.nanmean(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
if cumulative_parameter_indx is not None:
if c_ in cumulative_parameter_indx:
Values_mean[indx_avr_r, c_] = np.nansum(Values_[r_raw_a:r_raw_b, c_])
else:
Values_mean[indx_avr_r, c_] = np.nanmean(Values_[r_raw_a:r_raw_b, c_])
else:
Values_mean[indx_avr_r,c_] = np.nanmean(Values_[r_raw_a:r_raw_b,c_])
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_mean
def mean_discrete_std(Index_, Values_, avr_size, first_index, min_data=1, position_=0.):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
return None, None
if Index_[-1] < first_index:
return None, None
if min_data < 1:
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
Std_ = np.array(Values_mean)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
Values_mean[indx_avr_r] = np.nanmean(Values_[r_raw_a:r_raw_b])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
Values_mean[indx_avr_r,c_] = np.nanmean(Values_[r_raw_a:r_raw_b,c_])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b],c_)
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_mean,Std_
def sum_discrete_3D_array(Index_, array_3D, sum_size, first_index, min_data=1, position_=0.):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
print('Error, index must always be ascending')
return None, None
# error checking!
if Index_.shape[0] != array_3D.shape[0]:
print('Error, axes 0 of 3D array must be equal to Index size')
return None, None
if Index_[-1] < first_index:
print('Error, first')
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_summed_rows = int((final_index-first_index)/sum_size) + 1
Values_sum = np.zeros((total_summed_rows, array_3D.shape[1], array_3D.shape[2]))
Values_sum[:,:,:] = np.nan
Index_summed = np.zeros(total_summed_rows)
for r_ in range(total_summed_rows):
Index_summed[r_] = first_index + (r_ * sum_size)
Index_summed -= (position_ * sum_size)
indx_sum_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_sum_r <= total_summed_rows-2:
indx_sum_r += 1
indx_a = Index_summed[indx_sum_r]
indx_b = Index_summed[indx_sum_r] + sum_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
Values_sum[indx_sum_r,:,:] = np.nansum(array_3D[r_raw_a:r_raw_b,:,:],axis=0)
Index_summed = Index_summed + (position_ * sum_size)
return Index_summed,Values_sum
def row_average_rolling(arr_, average_size):
result_ = np.array(arr_) * np.nan
for r_ in range(arr_.shape[0] +1 - int(average_size)):
result_[r_] = np.nanmean(arr_[r_ : r_ + average_size])
return result_
def row_average_discrete_1D(arr_, average_size):
result_ = np.zeros(int(arr_.shape[0]/average_size)) * np.nan
for r_ in range(result_.shape[0]):
result_[r_] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def row_average_discrete_2D(arr_, average_size):
result_ = np.zeros((int(arr_.shape[0]/average_size), arr_.shape[1])) * np.nan
for r_ in range(result_.shape[0]):
result_[r_,:] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def row_average_discrete_3D(arr_, average_size):
result_ = np.zeros((int(arr_.shape[0]/average_size), arr_.shape[1], arr_.shape[2])) * np.nan
for r_ in range(result_.shape[0]):
result_[r_,:,:] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def column_average_discrete_2D(arr_, average_size):
result_ = np.zeros((arr_.shape[0], int(arr_.shape[1]/average_size))) * np.nan
for c_ in range(result_.shape[1]):
result_[:, c_] = np.nanmean(arr_[:, int(c_* average_size) : int(c_* average_size) + average_size], axis=1)
return result_
def column_average_discrete_3D(arr_, average_size):
result_ = np.zeros((arr_.shape[0], int(arr_.shape[1]/average_size), arr_.shape[2])) * np.nan
for c_ in range(result_.shape[1]):
result_[:, c_,:] = np.nanmean(arr_[:, int(c_* average_size) : int(c_* average_size) + average_size,:], axis=1)
return result_
def average_all_data_files_monthly(filename_, number_of_seconds, min_data_number = None,
WD_index = None, WS_index = None, cumulative_parameter_list=None):
header_, values_ = load_time_columns(filename_)
time_sec = time_days_to_seconds(values_[:,0])
# wind tratment
if WD_index is not None and WS_index is not None:
print('wind averaging underway for parameters: ' + header_[WD_index] + ' and ' + header_[WS_index])
# converting wind parameters to cartesian
WD_ = values_[:,WD_index]
WS_ = values_[:,WS_index]
North_, East_ = polar_to_cart(WD_, WS_)
values_[:,WD_index] = North_
values_[:,WS_index] = East_
# averaging
if min_data_number is None: min_data_number = int(number_of_seconds/60 * .75)
if cumulative_parameter_list is None:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx= None)
else:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx=np.array(cumulative_parameter_list) - 2)
if WD_index is not None and WS_index is not None:
# converting wind parameters to polar
North_ = Values_mean[:,WD_index - 2]
East_ = Values_mean[:,WS_index - 2]
WD_, WS_ = cart_to_polar(North_, East_)
Values_mean[:,WD_index - 2] = WD_
Values_mean[:,WS_index - 2] = WS_
output_filename = filename_.split('.')[0]
output_filename += '_' + str(int(number_of_seconds/60)) + '_minute_mean' + '.csv'
save_array_to_disk(header_[2:], Index_mean, Values_mean, output_filename)
print('Done!')
print('saved at: ' + output_filename)
def rolling_window(array_, window_size):
shape = array_.shape[:-1] + (array_.shape[-1] - window_size + 1, window_size)
strides = array_.strides + (array_.strides[-1],)
return np.lib.stride_tricks.as_strided(array_, shape=shape, strides=strides)
# wind direction related
def polar_to_cart(WD_, WS_):
WD_rad = np.radians(WD_)
North_ = WS_ * np.cos(WD_rad)
East_ = WS_ * np.sin(WD_rad)
return North_, East_
def cart_to_polar(North_, East_):
try:
WS_ = np.sqrt(North_**2 + East_**2)
WD_with_neg = np.degrees(np.arctan2(East_, North_))
mask_ = np.zeros(WD_with_neg.shape[0])
mask_[WD_with_neg < 0] = 360
WD_ = WD_with_neg + mask_
except:
WS_ = np.sqrt(North_**2 + East_**2)
WD_with_neg = np.degrees(np.arctan2(East_, North_))
mask_ = 0
if WD_with_neg < 0:
mask_ = 360
WD_ = WD_with_neg + mask_
return WD_, WS_
# time transforms
def combine_by_index(reference_index, var_index, var_values):
"""
finds point from var_index to each reference_index point, has to be exact, if not found then nan
:param reference_index: 1d array
:param var_index: 1d array of same size as var_values
:param var_values: 1d or 2d array of same size as var_index
:return: reindexed_var_values of same size as reference_index
"""
rows_ = reference_index.shape[0]
if len(var_values.shape) == 1:
reindexed_var_values = np.zeros(rows_) * np.nan
for r_ in range(rows_):
p_progress(r_, rows_)
where_ = np.where(var_index == reference_index[r_])[0]
if len(where_) > 0:
reindexed_var_values[r_] = var_values[where_[0]]
return reindexed_var_values
else:
reindexed_var_values = np.zeros((rows_, var_values.shape[1])) * np.nan
for r_ in range(rows_):
p_progress(r_, rows_)
where_ = np.where(var_index == reference_index[r_])[0]
if len(where_) > 0:
reindexed_var_values[r_, :] = var_values[where_[0], :]
return reindexed_var_values
def time_seconds_to_days(time_in_seconds):
return mdates.epoch2num(time_in_seconds)
def time_days_to_seconds(time_in_days):
return mdates.num2epoch(time_in_days)
def time_str_to_seconds(time_str, time_format):
# defining time arrays
if isinstance(time_str, str):
time_seconds = calendar.timegm(time.strptime(time_str, time_format))
else:
time_seconds = np.array([calendar.timegm(time.strptime(time_string_record, time_format))
for time_string_record in time_str])
return time_seconds
def time_seconds_to_str(time_in_seconds, time_format):
try:
x = len(time_in_seconds)
if isinstance(time_in_seconds, list):
time_in_seconds = np.array(time_in_seconds)
temp_array = np.zeros(time_in_seconds.shape[0],dtype="<U32")
for r_ in range(time_in_seconds.shape[0]):
temp_array[r_] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime(time_format)
return temp_array
except:
return datetime.datetime.utcfromtimestamp(time_in_seconds).strftime(time_format)
def time_seconds_to_5C_array(time_in_seconds):
if isinstance(time_in_seconds, int):
out_array = np.zeros(5, dtype=int)
out_array[0] = datetime.datetime.utcfromtimestamp(time_in_seconds).strftime('%Y')
out_array[1] = datetime.datetime.utcfromtimestamp(time_in_seconds).strftime('%m')
out_array[2] = datetime.datetime.utcfromtimestamp(time_in_seconds).strftime('%d')
out_array[3] = datetime.datetime.utcfromtimestamp(time_in_seconds).strftime('%H')
out_array[4] = datetime.datetime.utcfromtimestamp(time_in_seconds).strftime('%M')
else:
out_array = np.zeros((time_in_seconds.shape[0], 5), dtype=int)
for r_ in range(time_in_seconds.shape[0]):
out_array[r_, 0] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime('%Y')
out_array[r_, 1] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime('%m')
out_array[r_, 2] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime('%d')
out_array[r_, 3] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime('%H')
out_array[r_, 4] = datetime.datetime.utcfromtimestamp(time_in_seconds[r_]).strftime('%M')
return out_array
def time_era5_to_seconds(time_in_era5):
time_in_era5_in_seconds = np.array(time_in_era5, dtype=float) * 60 * 60
time_format_era5 = 'hours since %Y-%m-%d %H:%M'
time_seconds_start = calendar.timegm(time.strptime('hours since 1900-01-01 00:00', time_format_era5))
time_seconds_epoc = time_in_era5_in_seconds + time_seconds_start
return time_seconds_epoc
def time_seconds_to_struct(time_in_seconds):
time_struct_list = []
for t_ in time_in_seconds:
time_struct_list.append(time.gmtime(t_))
return time_struct_list
def time_to_row_str(time_array_seconds, time_stamp_str_YYYYmmDDHHMM):
time_stamp_seconds = time_str_to_seconds(time_stamp_str_YYYYmmDDHHMM, time_format_parsivel)
row_ = np.argmin(np.abs(time_array_seconds - time_stamp_seconds))
return row_
def time_to_row_sec(time_array_seconds, time_stamp_sec):
row_ = np.argmin(np.abs(time_array_seconds - time_stamp_sec))
return row_
def time_period_to_row_tuple(time_array_seconds, time_stamp_start_stop_str_YYYYmmDDHHMM):
time_start_seconds = time_str_to_seconds(time_stamp_start_stop_str_YYYYmmDDHHMM.split('_')[0], time_format_parsivel)
time_stop_seconds = time_str_to_seconds(time_stamp_start_stop_str_YYYYmmDDHHMM.split('_')[1], time_format_parsivel)
row_1 = np.argmin(np.abs(time_array_seconds - time_start_seconds))
row_2 = np.argmin(np.abs(time_array_seconds - time_stop_seconds))
return row_1, row_2
def convert_any_time_type_to_days(time_series, print_show=False):
time_days_normal_range = [727000, 748000]
time_secs_normal_range = [646800000, 2540240000]
# check if it is a str
if isinstance(time_series, str):
# try each known str_time_format and return time_seconds_to_days()
for time_str_format in time_str_formats:
try:
time_in_secs = time_str_to_seconds(time_series, time_str_format)
return time_seconds_to_days(time_in_secs)
except:
pass
if print_show: print('could not find correct time string format! returning nan')
return np.nan
# if not str, check if it is a single number
if isinstance(time_series, float) or isinstance(time_series, int):
if time_secs_normal_range[0] < time_series < time_secs_normal_range[1]:
return time_seconds_to_days(time_series)
elif time_days_normal_range[0] < time_series < time_days_normal_range[1]:
return time_series
else:
if print_show: print('could not find correct time number correction! returning nan')
return np.nan
else:
# multiple items
# check if series of strs
try:
if isinstance(time_series[0], str):
# try each known str_time_format and return time_seconds_to_days()
for time_str_format in time_str_formats:
try:
time_in_secs = time_str_to_seconds(time_series, time_str_format)
return time_seconds_to_days(time_in_secs)
except:
pass
if print_show: print('could not find correct time string format! returning None')
return None
else:
# get max and min
time_series_min = np.nanmin(time_series)
time_series_max = np.nanmax(time_series)
if time_secs_normal_range[0] < time_series_min and time_series_max < time_secs_normal_range[1]:
return time_seconds_to_days(time_series)
elif time_days_normal_range[0] < time_series_min and time_series_max < time_days_normal_range[1]:
return time_series
else:
if print_show: print('could not find correct time number correction! returning None')
return None
except:
if print_show: print('unknown type of data, returning None')
return None
def time_rman_blist_to_seconds(rman_2D_b_array, time_format='%H:%M:%S %d/%m/%Y'):
"""
takes bite arrays and converts to seconds
:param rman_2D_b_array: array where each row is a time stamp and columns are a character in bite format
:param time_format: string that defines the structure of the characters in each time stamp
:return: seconds array
"""
time_str_list = []
for row_ in range(rman_2D_b_array.shape[0]):
t_str = ''
for i in rman_2D_b_array[row_]:
t_str = t_str + i.decode('UTF-8')
time_str_list.append(t_str)
time_seconds = time_str_to_seconds(time_str_list, time_format)
return time_seconds
def create_time_series_seconds(start_time_str, stop_time_str, step_size):
start_time_sec = float(time_days_to_seconds(convert_any_time_type_to_days(start_time_str)))
stop__time_sec = float(time_days_to_seconds(convert_any_time_type_to_days(stop_time_str )))
time_list = []
t_ = start_time_sec
while t_ < stop__time_sec:
time_list.append(t_)
t_ += step_size
return np.array(time_list)
def day_night_discrimination(hour_of_day,values_,day_hours_range_tuple_inclusive):
day_ = np.array(values_) * np.nan
night_ = np.array(values_) * np.nan
for r_ in range(values_.shape[0]):
if day_hours_range_tuple_inclusive[0] <= hour_of_day[r_] <= day_hours_range_tuple_inclusive[1]:
day_[r_,:] = values_[r_,:]
else:
night_[r_,:] = values_[r_,:]
return day_, night_
def create_time_stamp_list_between_two_times(datetime_start_str,
datetime_end_str,
time_steps_in_sec,
input_time_format='%Y%m%d%H%M',
output_list_format='%Y%m%d%H%M'):
datetime_start_sec = time_str_to_seconds(datetime_start_str, input_time_format)
datetime_end_sec = time_str_to_seconds(datetime_end_str, input_time_format)
number_of_images = (datetime_end_sec - datetime_start_sec) / time_steps_in_sec
datetime_list_str = []
for time_stamp_index in range(int(number_of_images)):
datetime_list_str.append(time_seconds_to_str(datetime_start_sec + (time_stamp_index * time_steps_in_sec),
output_list_format))
return datetime_list_str
# animation
def update_animation_img(frame_number, img_animation, ax_, frame_list, title_list):
p_progress_bar(frame_number, len(frame_list), extra_text='of video created')
try:
new_frame = frame_list[frame_number,:,:]
except:
new_frame = frame_list[frame_number]
img_animation.set_data(new_frame)
ax_.set_title(str(title_list[frame_number]))
# ax_.set_xlabel(title_list[frame_number])
return img_animation
def update_animation_img_pcolormesh(frame_number, img_animation, ax_, frame_list, title_list):
p_progress(frame_number, len(frame_list), extra_text='of video created')
try:
new_frame = frame_list[frame_number,:,:]
except:
new_frame = frame_list[frame_number]
img_animation.set_array(new_frame.ravel())
ax_.set_title(str(title_list[frame_number]))
return img_animation
def update_animation_img_img_list(frame_number, img_animation, ax_, frame_list, title_list):
p_progress(frame_number, len(frame_list), extra_text='of video created')
new_frame = frame_list[frame_number]
img_animation.set_data(new_frame)
ax_.set_title(str(title_list[frame_number]))
# ax_.set_xlabel(title_list[frame_number])
return img_animation
def update_animation_img_scatter_list(frame_number, img_plot, sca_plot, ax_img,
frame_list, scatter_array_x, scatter_array_y, title_list):
p_progress(frame_number, len(frame_list), extra_text='of video created')
new_frame_img = frame_list[frame_number]
new_frame_sca_x = scatter_array_x[:frame_number]
new_frame_sca_y = scatter_array_y[:frame_number]
img_plot.set_data(new_frame_img)
sca_plot.set_data(new_frame_sca_x, new_frame_sca_y)
ax_img.set_title(str(title_list[frame_number]))
# ax_.set_xlabel(title_list[frame_number])
return img_plot
def animate_parsivel(frame_number, t_list, size_array, speed_array, spectrum_array_color, cmap_parsivel, img_plot, ax):
img_plot.remove()
img_plot = ax.pcolormesh(size_array, speed_array, spectrum_array_color[frame_number, :, :],
cmap=cmap_parsivel, vmin=0, vmax=8)
ax.set_title(str(t_list[frame_number]))
return img_plot
def create_video_animation_from_array_list(array_list, out_filename, colormap_=default_cm, extend_='', interval_=50,
dpi_=200, show_=False, save_=True, cbar_label='', title_list=None):
fig, ax_ = plt.subplots()
min_ = np.nanmin(array_list)
max_ = np.nanmax(array_list)
if title_list is None:
title_list_ = np.arange(len(array_list))
else:
title_list_ = title_list
if extend_=='':
img_figure = ax_.imshow(array_list[0], interpolation='none', cmap=colormap_, vmin=min_, vmax=max_)
else:
img_figure = ax_.imshow(array_list[0], interpolation='none', cmap=colormap_, vmin=min_, vmax=max_,
extent=[extend_[1], extend_[3], extend_[2], extend_[0]])
color_bar = fig.colorbar(img_figure)
color_bar.ax.set_ylabel(cbar_label)
img_animation = FuncAnimation(fig, update_animation_img, len(array_list), fargs=(img_figure, ax_, array_list, title_list_), interval=interval_)
if show_: plt.show()
if save_:
img_animation.save(out_filename, metadata={'artist':'Guido'}, dpi=dpi_)
plt.close(fig)
print('Done')
def create_video_animation_from_3D_array(array_, out_filename, colormap_=default_cm, extend_='', interval_=50, dpi_=200,
show_=False, save_=True, cbar_label='', title_list=None, format_='%.2f',
axes_off=False, show_colorbar=True, vmin_=None, vmax_=None):
fig, ax_ = plt.subplots()
if vmin_ is None: vmin_ = np.nanmin(array_)
if vmax_ is None: vmax_ = np.nanmax(array_)
if title_list is None or len(title_list) != array_.shape[0]:
title_list_ = np.arange(array_.shape[0])
else:
title_list_ = title_list
if extend_=='':
img_figure = ax_.imshow(array_[0,:,:], interpolation='none', cmap=colormap_, vmin=vmin_, vmax=vmax_)
else:
img_figure = ax_.imshow(array_[0,:,:], interpolation='none', cmap=colormap_, vmin=vmin_, vmax=vmax_,
extent=[extend_[1], extend_[3], extend_[2], extend_[0]])
if show_colorbar:
color_bar = fig.colorbar(img_figure,format=format_)
color_bar.ax.set_ylabel(cbar_label)
if axes_off: ax_.set_axis_off()
img_animation = FuncAnimation(fig, update_animation_img, array_.shape[0], fargs=(img_figure, ax_, array_, title_list_), interval=interval_)
if show_: plt.show()
if save_:
# img_animation.save(out_filename, writer='ffmpeg', codec='rawvideo')
img_animation.save(out_filename, metadata={'artist':'Guido'}, dpi=dpi_)
plt.close(fig)
print('Done')
def create_video_animation_from_img_arrays_list(array_list, out_filename, interval_=50, dpi_=200, show_=False,
save_=True, title_list=None):
fig, ax_ = plt.subplots()
if title_list is None:
title_list_ = np.arange(len(array_list))
else:
title_list_ = title_list
img_figure = ax_.imshow(array_list[0], interpolation='none')
ax_.set_axis_off()
img_animation = FuncAnimation(fig, update_animation_img_img_list, len(array_list),
fargs=(img_figure, ax_, array_list, title_list_), interval=interval_)
if show_: plt.show()
if save_:
img_animation.save(out_filename, metadata={'artist':'Guido'}, dpi=dpi_)
plt.close(fig)
print('Done')
def create_video_animation_from_3D_array_pcolormesh(array_values, array_x, array_y, out_filename, colormap_=default_cm,
interval_=50, dpi_=200, show_=False, save_=True,
cbar_label='', title_list=None,format_='%.2f', axes_off=False,
show_colorbar=True, x_header='', y_header='',
custom_y_range_tuple=None, custom_x_range_tuple=None,
vmin_=None, vmax_=None):
fig, ax_ = plt.subplots()
if vmin_ is None: vmin_ = np.nanmin(array_values)
if vmax_ is None: vmax_ = np.nanmax(array_values)
if title_list is None or len(title_list) != array_values.shape[0]:
title_list_ = np.arange(array_values.shape[0])
else:
title_list_ = title_list
img_figure = ax_.pcolormesh(array_x, array_y, array_values[0,:,:], cmap=colormap_,
vmin=vmin_, vmax=vmax_)#, shading='gouraud')
ax_.set_xlabel(x_header)
ax_.set_ylabel(y_header)
if custom_y_range_tuple is not None: ax_.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax_.set_xlim(custom_x_range_tuple)
if show_colorbar:
color_bar = fig.colorbar(img_figure,format=format_)
color_bar.ax.set_ylabel(cbar_label)
if axes_off: ax_.set_axis_off()
img_animation = FuncAnimation(fig, update_animation_img_pcolormesh, frames=array_values.shape[0],
fargs=(img_figure, ax_, array_values, title_list_), interval=interval_)
if show_: plt.show()
if save_:
# img_animation.save(out_filename, writer='ffmpeg', codec='rawvideo')
img_animation.save(out_filename, metadata={'artist':'Guido'}, dpi=dpi_)
plt.close(fig)
print('Done')
# display / plotting
def p_plot(X_series,Y_,
S_=5, c_='', label_=None,
x_header=None,y_header=None, t_line=False, grid_=False, cus_loc =None, cmap_=default_cm,
custom_y_range_tuple=None, custom_x_range_tuple=None, figsize_ = (10,6), save_fig=False, figure_filename='',
custom_x_ticks_start_end_step=None, custom_y_ticks_start_end_step=None, extra_text='', title_str = '',
time_format_=None, x_as_time=True, c_header=None, add_line=False, linewidth_=2, fig_ax=None,
line_color='black', invert_y=False, invert_x=False, log_x=False,log_y=False, transparent_=True,
density_=False, t_line_1_1 = True, t_line_color = 'r', fit_function=None, show_cbar=False,
text_box_str=None, text_box_loc=None, skewt=False, filled_arr=None,
legend_show=False, legend_loc='upper left'):
if fig_ax is not None:
fig, ax = fig_ax
else:
if skewt:
fig = plt.figure(figsize=figsize_)
ax = fig.add_subplot(111, projection='skewx')
else:
fig, ax = plt.subplots(figsize=figsize_)
x_is_time_cofirmed = True
if x_as_time==True and density_==False and invert_x==False and log_x==False:
X_ = convert_any_time_type_to_days(X_series)
if X_ is None:
X_ = X_series
x_is_time_cofirmed = False
else:
X_ = X_series
x_is_time_cofirmed = False
if skewt:
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
if c_ == '': c_ = 'black'
ax.semilogy(X_, Y_, color=c_)
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_minor_formatter(NullFormatter())
ax.set_yticks(np.linspace(100, 1000, 10))
ax.set_ylim(1050, 100)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.set_xlim(-50, 50)
x_as_time = False
ax.grid(True)
else:
if density_:
ax = p_density_scatter(X_, Y_, s = S_, fig_ax=[fig, ax], cmap_=cmap_, show_cbar=show_cbar)
else:
if c_=='':
if add_line:
ax.scatter(X_, Y_, s=S_, lw=0, c='black')
ax.plot(X_, Y_, c=line_color, linewidth=linewidth_, label=label_)
if filled_arr is not None:
ax.fill_between(X_, Y_, filled_arr, facecolor=line_color, interpolate=True)
else:
ax.scatter(X_, Y_, s=S_, lw=0, c='black', label=label_)
elif type(c_) == str:
if add_line:
ax.plot(X_, Y_, c=c_, linewidth=linewidth_, label=label_)
ax.scatter(X_, Y_, s=S_, lw=0, c=c_)
if filled_arr is not None:
ax.fill_between(X_, Y_, filled_arr, facecolor=line_color, interpolate=True)
else:
ax.scatter(X_, Y_, s=S_, lw=0, c=c_, label=label_)
else:
im = ax.scatter(X_,Y_, s = S_, lw = 0, c = c_, cmap = cmap_)
color_bar = fig.colorbar(im,fraction=0.046, pad=0.04)
if c_header is not None: color_bar.ax.set_ylabel(c_header)
if x_header is not None: ax.set_xlabel(x_header)
if y_header is not None: ax.set_ylabel(y_header)
# ax.yaxis.set_ticks(np.arange(180, 541, 45))
if grid_:
ax.grid(True)
if t_line:
Rsqr = plot_trend_line(ax, X_, Y_, c=t_line_color, alpha=1, cus_loc = cus_loc,
extra_text=extra_text, t_line_1_1= t_line_1_1, fit_function=fit_function)
else:
Rsqr = None
if invert_y:
ax.invert_yaxis()
if invert_x:
ax.invert_xaxis()
if log_x:
ax.set_xscale("log")#, nonposy='clip')
if log_y:
ax.set_yscale("log")#, nonposy='clip')
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None:
if x_as_time == True and density_ == False and invert_x == False and log_x == False and x_is_time_cofirmed == True:
r_1 = convert_any_time_type_to_days(custom_x_range_tuple[0])
r_2 = convert_any_time_type_to_days(custom_x_range_tuple[1])
ax.set_xlim((r_1,r_2))
else:
ax.set_xlim(custom_x_range_tuple)
if custom_x_ticks_start_end_step is not None:
ax.xaxis.set_ticks(np.arange(custom_x_ticks_start_end_step[0], custom_x_ticks_start_end_step[1],
custom_x_ticks_start_end_step[2]))
if custom_y_ticks_start_end_step is not None:
ax.yaxis.set_ticks(np.arange(custom_y_ticks_start_end_step[0], custom_y_ticks_start_end_step[1],
custom_y_ticks_start_end_step[2]))
if x_as_time==True and density_==False and invert_x==False and log_x==False and x_is_time_cofirmed==True:
if time_format_ is None:
plot_format_mayor = mdates.DateFormatter(time_format_mod)
else:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax.xaxis.set_major_formatter(plot_format_mayor)
if legend_show:
ax.legend(loc=legend_loc)
ax.set_title(title_str)
if text_box_str is not None:
if text_box_loc is None:
x_1 = ax.axis()[0]
y_2 = ax.axis()[3]
text_color = 'black'
ax.text(x_1, y_2 , str(text_box_str),
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
x_1 = text_box_loc[0]
y_2 = text_box_loc[1]
text_color = 'black'
ax.text(x_1, y_2 , str(text_box_str),
horizontalalignment='left',verticalalignment='top',color=text_color)
if save_fig or figure_filename != '':
if figure_filename == '':
name_ = str(calendar.timegm(time.gmtime()))[:-2]
fig.savefig(path_output + 'image_' + name_ + '.png', transparent=True, bbox_inches='tight')
else:
fig.savefig(figure_filename, transparent=transparent_, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return fig, ax, Rsqr
def p_(header_):
# parameters list
print('-' * 20)
print('Parameters: ')
parameter_list = ''
fill_len = len(str(len(header_)))
for i, parameter_ in enumerate(header_):
parameter_list = str(parameter_list) + str(i).rjust(fill_len) + " ---> " + str(parameter_) + '\n'
print(parameter_list)
print('-' * 20)
def p_progress(current_count, total_count, display_each_percent=10, extra_text='done'):
if total_count <= display_each_percent:
if total_count > 0:
print(int(100 * current_count / total_count), '%', extra_text)
else:
total_count_corrected = int(total_count / display_each_percent) * display_each_percent
if display_each_percent * current_count / total_count_corrected % 1 == 0:
if 0 < int(100 * current_count / total_count_corrected) <= 100:
print(int(100 * current_count / total_count_corrected), '%', extra_text)
def p_progress_bar(current_count, total_count, extra_text='done'):
display_each_percent = 5
units_ = int(100 / display_each_percent)
if current_count == 0:
print('|' + ' ' * units_ + '| %', extra_text, end="", flush=True)
if current_count == total_count -1:
print('\r', end='')
print('|' + '-' * units_ + '| %', extra_text + '!finished!')
else:
if total_count <= units_:
if total_count > 0:
print('\r', end='')
print('|', end="", flush=True)
str_ = '-' * current_count
str_ = str_ + ' ' * (units_ - current_count)
print(str_, end="", flush=True)
print('| % ', extra_text, end="", flush=True)
else:
percentage_ = int((current_count / total_count) * 100)
if percentage_ / display_each_percent % 1 == 0:
if 0 < percentage_ <= 100:
print('\r', end='')
print('|', end="", flush=True)
str_ = '-' * int(percentage_ / display_each_percent)
str_ = str_ + ' ' * (units_ - int(percentage_ / display_each_percent))
print(str_, end="", flush=True)
print('| % ', extra_text, end="", flush=True)
def p_hist(data_, figsize_ = (10,6), fig_ax=None, title_str='', x_header=None, y_header=None, x_bins=None):
if len(data_.shape) > 1:
data_display = data_.flatten()
else:
data_display = data_
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots(figsize=figsize_)
ax.hist(data_display[~np.isnan(data_display)],x_bins)
ax.set_title(title_str)
if x_header is not None: ax.set_xlabel(x_header)
if y_header is not None: ax.set_ylabel(y_header)
return fig, ax
def get_chart_range(ax):
x_1 = ax.axis()[0]
x_2 = ax.axis()[1]
y_1 = ax.axis()[2]
y_2 = ax.axis()[3]
return x_1,x_2,y_1,y_2
def p_arr_vectorized(A_, cmap_=default_cm, figsize_= (10,6), vmin_=None,vmax_=None, cbar_label = ''):
fig, ax = plt.subplots(figsize=figsize_)
if vmin_ is None: vmin_ = np.nanmin(A_)
if vmax_ is None: vmax_ = np.nanmax(A_)
y_, x_ = np.mgrid[0:A_.shape[0], 0:A_.shape[1]]
surf_ = ax.pcolormesh(x_, y_, A_, cmap=cmap_, vmin=vmin_, vmax=vmax_)
color_bar = fig.colorbar(surf_)
color_bar.ax.set_ylabel(cbar_label)
return fig, ax
def p_arr_vectorized_2(array_v, array_x, array_y,custom_y_range_tuple=None, custom_x_range_tuple=None,
x_header='', y_header='', cbar_label = '', title_str='',
cmap_=default_cm, figsize_= (10,6), vmin_=None,vmax_=None,
figure_filename = None, time_format_ = None):
fig, ax = plt.subplots(figsize=figsize_)
# plt.close(fig)
if vmin_ is None: vmin_ = np.nanmin(array_v)
if vmax_ is None: vmax_ = np.nanmax(array_v)
if len(array_x.shape) == 1:
array_y_reshaped = np.zeros((array_v.shape[0], array_v.shape[1]), dtype=float)
array_x_reshaped = np.zeros((array_v.shape[0], array_v.shape[1]), dtype=float)
for r_ in range(array_v.shape[0]):
array_y_reshaped[r_, :] = array_y
for c_ in range(array_v.shape[1]):
array_x_reshaped[:, c_] = array_x
else:
array_y_reshaped = array_y
array_x_reshaped = array_x
surf_ = ax.pcolormesh(array_x_reshaped, array_y_reshaped, array_v, cmap=cmap_, vmin=vmin_, vmax=vmax_)
color_bar = fig.colorbar(surf_)
color_bar.ax.set_ylabel(cbar_label)
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
ax.set_title(title_str)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if time_format_ is not None:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax.xaxis.set_major_formatter(plot_format_mayor)
if figure_filename is not None:
fig.savefig(figure_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return
return fig, ax
def p_arr_vectorized_3(array_v, array_x, array_y,
custom_y_range_tuple=None, custom_x_range_tuple=None,
custom_ticks_x=None, custom_ticks_y=None,
x_header='', y_header='', cbar_label = '', title_str='', contour_=False, contourF_=False,
cmap_=default_cm, figsize_= (10,6), vmin_=None, vmax_=None, show_cbar=True, cbar_format='%.2f',
figure_filename = None, grid_=False, time_format_ = None, fig_ax=None,
colorbar_tick_labels_list=None, show_x_ticks=True, show_y_ticks=True,cbar_ax=None,
invert_y=False, invert_x=False, levels=None, text_box_str=None,text_box_loc=None):
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots(figsize=figsize_)
if vmin_ is None: vmin_ = np.nanmin(array_v)
if vmax_ is None: vmax_ = np.nanmax(array_v)
if len(array_x.shape) == 1:
array_x_reshaped = np.zeros((array_v.shape[0], array_v.shape[1]), dtype=float)
for c_ in range(array_v.shape[1]):
array_x_reshaped[:, c_] = array_x
else:
array_x_reshaped = array_x
array_x = array_x_reshaped
if len(array_y.shape) == 1:
array_y_reshaped = np.zeros((array_v.shape[0], array_v.shape[1]), dtype=float)
for r_ in range(array_v.shape[0]):
array_y_reshaped[r_, :] = array_y
else:
array_y_reshaped = array_y
array_y = array_y_reshaped
if time_format_ is not None:
array_x = convert_any_time_type_to_days(array_x_reshaped)
if contour_:
surf_ = ax.contour(array_x, array_y, array_v, levels=levels, cmap=cmap_, vmin=vmin_, vmax=vmax_)
elif contourF_:
surf_ = ax.contourf(array_x, array_y, array_v, levels=levels, cmap=cmap_, vmin=vmin_, vmax=vmax_)
else:
surf_ = ax.pcolormesh(array_x, array_y, array_v, cmap=cmap_, vmin=vmin_, vmax=vmax_)
if show_cbar:
if cbar_ax is None:
color_bar = fig.colorbar(surf_, format=cbar_format)
else:
color_bar = fig.colorbar(surf_, format=cbar_format, cax=cbar_ax)
color_bar.ax.set_ylabel(cbar_label)
if colorbar_tick_labels_list is not None:
ticks_ = np.linspace(0.5, len(colorbar_tick_labels_list) - 0.5, len(colorbar_tick_labels_list))
color_bar.set_ticks(ticks_)
color_bar.set_ticklabels(colorbar_tick_labels_list)
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
ax.set_title(title_str)
ax.grid(grid_)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if time_format_ is not None:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax.xaxis.set_major_formatter(plot_format_mayor)
ax.format_coord = lambda x, y: 'x=%s, y=%g, v=%g' % (plot_format_mayor(x),
y,
array_v[int(np.argmin(np.abs(array_x[:, 0] - x))), int(
np.argmin(np.abs(array_y[0, :] - y)))])
else:
ax.format_coord = lambda x, y: 'x=%1.2f, y=%g, v=%g' % (x,
y,
array_v[
int(np.argmin(np.abs(array_x[:, 0] - x))), int(
np.argmin(np.abs(array_y[0, :] - y)))])
if not show_x_ticks:
plt.setp(ax.get_xticklabels(), visible=False)
if not show_y_ticks:
plt.setp(ax.get_yticklabels(), visible=False)
if invert_y:
ax.invert_yaxis()
if invert_x:
ax.invert_xaxis()
if custom_ticks_x is not None: ax.xaxis.set_ticks(custom_ticks_x)
if custom_ticks_y is not None: ax.yaxis.set_ticks(custom_ticks_y)
if text_box_str is not None:
if text_box_loc is None:
x_1 = ax.axis()[0]
y_2 = ax.axis()[3]
text_color = 'black'
ax.text(x_1, y_2 , str(text_box_str),
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
x_1 = text_box_loc[0]
y_2 = text_box_loc[1]
text_color = 'black'
ax.text(x_1, y_2 , str(text_box_str),
horizontalalignment='left',verticalalignment='top',color=text_color)
if figure_filename is not None:
fig.savefig(figure_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return
return fig, ax, surf_
def p_arr(A_, cmap_=default_cm, extend_x1_x2_y1_y2 =(0,1), figsize_= (10, 6), aspect_='auto', rot_=0, title_str = '',
vmin_=None, vmax_=None, cbar_label = '', x_as_time=False, time_format_='%H:%M %d%b%y', save_fig=False,
figure_filename='', x_header='',y_header='', x_ticks_tuple=None, y_ticks_tuple=None, fig_ax=None,
origin_='upper', colorbar_tick_labels_list=None, tick_label_format='plain', tick_offset=False):
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots(figsize=figsize_)
A_copy = np.array(A_)
if vmin_ is not None: A_copy[A_copy < vmin_] = vmin_
if vmax_ is not None: A_copy[A_copy > vmax_] = vmax_
if rot_ != 0:
A_copy = np.rot90(A_copy, rot_)
if len(extend_x1_x2_y1_y2)==2:
img_ = ax.imshow(A_copy, interpolation='none', cmap=cmap_, aspect= aspect_, vmin=vmin_, vmax=vmax_, origin=origin_)
else:
img_ = ax.imshow(A_copy, interpolation='none', cmap=cmap_, aspect= aspect_, origin=origin_, vmin=vmin_, vmax=vmax_,
extent=[extend_x1_x2_y1_y2[0], extend_x1_x2_y1_y2[1], extend_x1_x2_y1_y2[2], extend_x1_x2_y1_y2[3]])
color_bar = fig.colorbar(img_)
color_bar.ax.set_ylabel(cbar_label)
if colorbar_tick_labels_list is not None:
ticks_ = np.linspace(0.5, len(colorbar_tick_labels_list) - 0.5, len(colorbar_tick_labels_list))
color_bar.set_ticks(ticks_)
color_bar.set_ticklabels(colorbar_tick_labels_list)
if x_as_time:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax.xaxis.set_major_formatter(plot_format_mayor)
ax.set_title(title_str)
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
if x_ticks_tuple is not None:
ax.xaxis.set_ticks(np.arange(x_ticks_tuple[0], x_ticks_tuple[1], x_ticks_tuple[2]))
if y_ticks_tuple is not None:
ax.yaxis.set_ticks(np.arange(y_ticks_tuple[0], y_ticks_tuple[1], y_ticks_tuple[2]))
ax.ticklabel_format(useOffset=tick_offset, style='plain')
plt.tight_layout()
if save_fig or figure_filename != '':
if figure_filename == '':
name_ = str(calendar.timegm(time.gmtime()))[:-2]
fig.savefig(path_output + 'image_' + name_ + '.png', transparent=True, bbox_inches='tight')
else:
fig.savefig(figure_filename, transparent=False, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return fig, ax, img_, color_bar
def p_plot_colored_lines(x_array, y_array, color_array, tick_labels_list, fig_ax=None, figsize_= (10, 6),
x_header='', y_header='', figure_filename = None, time_format='', cbar_show=True,
custom_y_range_tuple=None, custom_x_range_tuple=None, grid_=False, cbar_ax=None,
cmap = listed_cm):
# plot rain rate colored by rain type
points = np.array([x_array, y_array]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots(figsize=figsize_)
# Use a boundary norm instead
norm = BoundaryNorm(np.arange(len(tick_labels_list)+1), cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(color_array)
lc.set_linewidth(2)
line = ax.add_collection(lc)
if cbar_show:
if cbar_ax is None:
cb2 = fig.colorbar(line, ax=ax)
else:
cb2 = fig.colorbar(line, cax=cbar_ax)
ticks_ = np.linspace(0.5, len(tick_labels_list) - 0.5, len(tick_labels_list))
cb2.set_ticks(ticks_)
cb2.set_ticklabels(tick_labels_list)
# x_array = convert_any_time_type_to_days(x_array)
ax.set_xlim(x_array.min(),
x_array.max())
ax.set_ylim(y_array.min(), y_array.max())
ax.set_ylabel(y_header)
ax.set_xlabel(x_header)
ax.grid(grid_)
if time_format != '':
plot_format_mayor = mdates.DateFormatter(time_format)
ax.xaxis.set_major_formatter(plot_format_mayor)
# plt.xticks(rotation=45)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if figure_filename is not None:
fig.savefig(figure_filename , transparent=True, bbox_inches='tight')
plt.close(fig)
else:
return fig, ax
def plot_3D_scatter(x_series, y_series, z_series, label_names_tuples_xyz=tuple(''), size_ = 15, color_='b'):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x_series, y_series, z_series,s=size_,c=color_,lw = 0)
if len(label_names_tuples_xyz) == 3:
ax.set_xlabel(label_names_tuples_xyz[0])
ax.set_ylabel(label_names_tuples_xyz[1])
ax.set_zlabel(label_names_tuples_xyz[2])
plt.show()
return fig, ax
def plot_3D_stacket_series_lines(x_z_series_list, y_series=None, y_as_time=False, time_format=time_format,
log_z=False, invert_z=False,
custom_x_range_tuple=None, custom_y_range_tuple=None, custom_z_range_tuple=None,
label_names_tuples_xyz=tuple(''), color_='b'):
fig = plt.figure()
ax = Axes3D(fig)
if y_series is None:
y_series = np.arange(len(x_z_series_list))
for t_ in range(len(x_z_series_list)):
y_ = np.ones(len(x_z_series_list[t_][0])) * y_series[t_]
ax.plot(x_z_series_list[t_][0], y_, x_z_series_list[t_][1], c=color_)
if len(label_names_tuples_xyz) == 3:
ax.set_xlabel(label_names_tuples_xyz[0])
ax.set_ylabel(label_names_tuples_xyz[1])
ax.set_zlabel(label_names_tuples_xyz[2])
if y_as_time:
plot_format_mayor = mdates.DateFormatter(time_format)
ax.yaxis.set_major_formatter(plot_format_mayor)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_z_range_tuple is not None: ax.set_zlim(custom_z_range_tuple)
if log_z:
ax.set_zscale("log")#, nonposy='clip')
if invert_z:
ax.invert_zaxis()
ax.yaxis.set_ticks(y_series)
plt.show()
return fig, ax
def plot_shared_x_axis(X_Y_list, S_=5, x_header=None,y_header_list=None, t_line=False, grid_=False, cus_loc =None,
c_='', custom_y_range_tuple=None, custom_x_range_tuple=None, figsize_ = (10,6), save_fig=False,
figure_filename='',title_str = '', cmap_=default_cm, sharex=True, sharey=False,
custom_x_ticks_start_end_step=None, custom_y_ticks_start_end_step=None, rot_y_label=90,
time_format_='%H:%M %d%b%y', x_as_time=False, add_line=False, linewidth_=2,
invert_y=False, invert_x=False, log_x=False,log_y=False, transparent_=True):
fig, (ax_list) = plt.subplots(nrows=len(X_Y_list), sharex=sharex, sharey=sharey, figsize=figsize_)
if c_=='':
n = int(len(X_Y_list))
color_list = cmap_(np.linspace(0, 1, n))
for series_number in range(len(X_Y_list)):
ax_list[series_number].scatter(X_Y_list[series_number][0],X_Y_list[series_number][1],
c= color_list[series_number], s = S_, lw = 0)
if add_line:
ax_list[series_number].plot(X_Y_list[series_number][0], X_Y_list[series_number][1],
c=color_list[series_number], linewidth=linewidth_)
else:
for series_number in range(len(X_Y_list)):
ax_list[series_number].scatter(X_Y_list[series_number][0],X_Y_list[series_number][1],
s = S_, lw = 0, c = c_)
if x_header is not None: ax_list[-1].set_xlabel(x_header)
for series_number in range(len(X_Y_list)):
if y_header_list is not None:
ax_list[series_number].set_ylabel(y_header_list[series_number], rotation=rot_y_label)
if grid_:
ax_list[series_number].grid(True)
if t_line:
plot_trend_line(ax_list[series_number], X_Y_list[series_number][0],X_Y_list[series_number][1],
order=1, c='r', alpha=1, cus_loc = cus_loc)
if custom_y_range_tuple is not None: ax_list[series_number].set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax_list[series_number].set_xlim(custom_x_range_tuple)
if custom_x_ticks_start_end_step is not None:
ax_list[series_number].xaxis.set_ticks(np.arange(custom_x_ticks_start_end_step[0],
custom_x_ticks_start_end_step[1],
custom_x_ticks_start_end_step[2]))
if custom_y_ticks_start_end_step is not None:
ax_list[series_number].yaxis.set_ticks(np.arange(custom_y_ticks_start_end_step[0],
custom_y_ticks_start_end_step[1],
custom_y_ticks_start_end_step[2]))
if x_as_time:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax_list[series_number].xaxis.set_major_formatter(plot_format_mayor)
if invert_y:
ax_list[series_number].invert_yaxis()
if invert_x:
ax_list[series_number].invert_xaxis()
if log_x:
ax_list[series_number].set_xscale("log", nonposy='clip')
if log_y:
ax_list[series_number].set_yscale("log", nonposy='clip')
for series_number in range(len(X_Y_list)-1):
plt.setp(ax_list[series_number].get_xticklabels(), visible=False)
ax_list[0].set_title(title_str)
fig.tight_layout()
if save_fig or figure_filename != '':
if figure_filename == '':
name_ = str(calendar.timegm(time.gmtime()))[:-2]
fig.savefig(path_output + 'image_' + name_ + '.png', transparent=True, bbox_inches='tight')
else:
fig.savefig(figure_filename, transparent=transparent_, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return fig, ax_list
def plot_shared_y_axis(X_Y_list, S_=5, x_header_list=None, y_header=None, t_line=False, grid_=False, cus_loc=None,
c_='', custom_y_range_tuple=None, custom_x_range_tuple=None, figsize_=(10, 6), save_fig=False,
figure_filename='', title_str='', cmap_=default_cm, sharex=False, sharey=True,
custom_x_ticks_start_end_step=None, custom_y_ticks_start_end_step=None,
time_format_='%H:%M %d%b%y', x_as_time=False, add_line=False, linewidth_=2,
invert_y=False, invert_x=False, log_x=False, log_y=False, transparent_=True):
fig, (ax_list) = plt.subplots(ncolumns=len(X_Y_list), sharex=sharex, sharey=sharey, figsize=figsize_)
if c_ == '':
n = int(len(X_Y_list))
color_list = cmap_(np.linspace(0, 1, n))
for series_number in range(len(X_Y_list)):
ax_list[series_number].scatter(X_Y_list[series_number][0], X_Y_list[series_number][1],
c=color_list[series_number], s=S_, lw=0)
if add_line:
ax_list[series_number].plot(X_Y_list[series_number][0], X_Y_list[series_number][1],
c=color_list[series_number], linewidth=linewidth_)
else:
for series_number in range(len(X_Y_list)):
ax_list[series_number].scatter(X_Y_list[series_number][0], X_Y_list[series_number][1],
s=S_, lw=0, c=c_[series_number], cmap=cmap_)
if y_header is not None: ax_list[0].set_ylabel(y_header)
for series_number in range(len(X_Y_list)):
if x_header_list is not None:
ax_list[series_number].set_ylabel(x_header_list[series_number])
if grid_:
ax_list[series_number].grid(True)
if t_line:
plot_trend_line(ax_list[series_number], X_Y_list[series_number][0], X_Y_list[series_number][1],
order=1, c='r', alpha=1, cus_loc=cus_loc)
if custom_y_range_tuple is not None: ax_list[series_number].set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax_list[series_number].set_xlim(custom_x_range_tuple)
if custom_x_ticks_start_end_step is not None:
ax_list[series_number].xaxis.set_ticks(np.arange(custom_x_ticks_start_end_step[0],
custom_x_ticks_start_end_step[1],
custom_x_ticks_start_end_step[2]))
if custom_y_ticks_start_end_step is not None:
ax_list[series_number].yaxis.set_ticks(np.arange(custom_y_ticks_start_end_step[0],
custom_y_ticks_start_end_step[1],
custom_y_ticks_start_end_step[2]))
if x_as_time:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax_list[series_number].xaxis.set_major_formatter(plot_format_mayor)
if invert_y:
ax_list[series_number].invert_yaxis()
if invert_x:
ax_list[series_number].invert_xaxis()
if log_x:
ax_list[series_number].set_xscale("log", nonposy='clip')
if log_y:
ax_list[series_number].set_yscale("log", nonposy='clip')
for series_number in range(len(X_Y_list) - 1):
plt.setp(ax_list[series_number+1].get_xticklabels(), visible=False)
ax_list[0].set_title(title_str)
fig.tight_layout()
if save_fig or figure_filename != '':
if figure_filename == '':
name_ = str(calendar.timegm(time.gmtime()))[:-2]
fig.savefig(path_output + 'image_' + name_ + '.png', transparent=True, bbox_inches='tight')
else:
fig.savefig(figure_filename, transparent=transparent_, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return fig, ax_list
def scatter_custom_size(X_,Y_,S_, x_header=None,y_header=None, t_line=False, grid_=False, cus_loc =None, c_='',
custom_y_range_tuple=None, custom_x_range_tuple=None, figsize_ = (10,6), save_fig=False,
custom_x_ticks_start_end_step=None, custom_y_ticks_start_end_step=None, extra_text='',
time_format_='%H:%M %d%b%y', x_as_time=False, c_header=None, add_line=False, linewidth_=2,
line_color='black'):
fig, ax = plt.subplots(figsize=figsize_)
if c_=='':
ax.scatter(X_,Y_, s = S_, lw = 0, c = 'black')
if add_line:
ax.plot(X_, Y_, c=line_color, linewidth=linewidth_)
else:
im = ax.scatter(X_,Y_, s = S_, lw = 0, c = c_, cmap = default_cm)
color_bar = fig.colorbar(im,fraction=0.046, pad=0.04)
if c_header is not None: color_bar.ax.set_ylabel(c_header)
if x_header is not None: ax.set_xlabel(x_header)
if y_header is not None: ax.set_ylabel(y_header)
# ax.yaxis.set_ticks(np.arange(180, 541, 45))
if grid_:
ax.grid(True)
if t_line:
plot_trend_line(ax, X_, Y_, order=1, c='r', alpha=1, cus_loc = cus_loc, extra_text=extra_text)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if custom_x_ticks_start_end_step is not None:
ax.xaxis.set_ticks(np.arange(custom_x_ticks_start_end_step[0], custom_x_ticks_start_end_step[1], custom_x_ticks_start_end_step[2]))
if custom_y_ticks_start_end_step is not None:
ax.yaxis.set_ticks(np.arange(custom_y_ticks_start_end_step[0], custom_y_ticks_start_end_step[1], custom_y_ticks_start_end_step[2]))
if x_as_time:
plot_format_mayor = mdates.DateFormatter(time_format_)
ax.xaxis.set_major_formatter(plot_format_mayor)
if save_fig:
name_ = str(calendar.timegm(time.gmtime()))[:-2]
fig.savefig(path_output + 'image_' + name_ + '.png',transparent=True, bbox_inches='tight')
else:
plt.show()
return fig, ax
def Display_emission_array(filename_, variable_name):
netcdf_file_object = nc.Dataset(filename_, 'r')
p_arr(netcdf_file_object.variables[variable_name][0, 0, ::-1, :])
netcdf_file_object.close()
def power_plot(X_, Y_, Size_=5, x_header='',y_header='', trend_line=False, show_line=False, lw_=2, grid_=False,
cus_loc = '', c_='', custom_y_range_tuple=None, custom_x_range_tuple=None, cbar_label = ''):
fig, ax = plt.subplots()
if c_=='':
ax.scatter(X_,Y_, s = Size_, lw = 0, c = 'black')
if show_line:
ax.plot(X_,Y_, lw = lw_, color = 'black')
else:
im = ax.scatter(X_,Y_, s = Size_, lw = 0, c = c_, cmap = default_cm)
ax.plot(X_,Y_, lw = lw_, c = c_, cmap = default_cm)
color_bar = fig.colorbar(im,fraction=0.046, pad=0.04)
color_bar.ax.set_ylabel(cbar_label)
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
if grid_:
ax.grid(True)
if trend_line:
plot_trend_line(ax, X_, Y_, order=1, c='r', alpha=1, cus_loc = cus_loc)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
plt.show()
return fig, ax
def power_plot_with_error(X_, Y_, yerr_, Size_=5, c_='', x_header='',y_header='', trend_line=False, lw_=2, grid_=False,
cus_loc = '', custom_y_range_tuple=None, custom_x_range_tuple=None, cbar_label = ''):
fig, ax = plt.subplots()
if c_=='':
ax.scatter(X_,Y_, s = Size_, lw = 0, c = 'black')
ax.errorbar(X_,Y_, yerr=yerr_, color = 'black')
else:
im = ax.scatter(X_,Y_, s = Size_, lw = 0, c = c_, cmap = default_cm)
ax.plot(X_,Y_, lw = lw_, c = c_, cmap = default_cm)
color_bar = fig.colorbar(im,fraction=0.046, pad=0.04)
color_bar.ax.set_ylabel(cbar_label)
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
if grid_:
ax.grid(True)
if trend_line:
plot_trend_line(ax, X_, Y_, order=1, c='r', alpha=1, cus_loc = cus_loc)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
plt.show()
def plot_preview_x_as_time(header_,days_,values_):
plot_format_mayor = mdates.DateFormatter('%H:%M %d%b%y')
fig, ax = plt.subplots()
if len(values_.shape) > 1:
for c_ in range(values_.shape[1]):
ax.plot_date(days_,values_[:,c_], markersize=2, markeredgewidth=0, label=header_[c_])
else:
ax.plot_date(days_,values_,'ko-', markersize=2, markeredgewidth=0, label=header_)
ax.xaxis.set_major_formatter(plot_format_mayor)
plt.show()
def plot_values_x_as_time(header_,values_,x_array,y_list,
legend_=False, plot_fmt_str0='%H:%M %d%b%y'):
color_list = default_cm(np.linspace(0,1,len(y_list)))
plot_format_mayor = mdates.DateFormatter(plot_fmt_str0)
fig, ax = plt.subplots()
for c_,y_ in enumerate(y_list):
color_ = color_list[c_]
ax.plot(x_array,values_[:,y_], color = color_,label=header_[y_])
ax.xaxis.set_major_formatter(plot_format_mayor)
fig.tight_layout()
if legend_: ax.legend(loc=(.95,.0))
plt.show()
def plot_trend_line(axes_, xd, yd, c='r', alpha=1, cus_loc = None, text_color='black',
extra_text='', t_line_1_1=True, fit_function=None):
"""Make a line of best fit"""
#create clean series
x_, y_ = coincidence(xd,yd)
if fit_function is not None:
params = curve_fit(fit_function, x_, y_)
print('fitted parameters')
print(params[0])
fit_line_x = np.arange(int(np.nanmin(x_)),int(np.nanmax(x_))+1,.1)
plotting_par_list = [fit_line_x]
for fit_par in params[0]:
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
axes_.plot(fit_line_x, fit_line_y, c, alpha=alpha)
# calculate R2
plotting_par_list = [x_]
params_str_ = ''
for i_, fit_par in enumerate(params[0]):
if extra_text == '':
params_str_ = params_str_ + 'fit parameters ' + str(i_+1) + ': ' + '$%0.2f$' % (fit_par) + '\n'
else:
params_str_ = params_str_ + extra_text + '$%0.2f$' % (fit_par) + '\n'
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
residuals = y_ - fit_line_y
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_ - np.mean(y_))**2)
Rsqr = float(1 - (ss_res / ss_tot))
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
error_text = '$R^2 = %0.2f$' % Rsqr
if cus_loc is None:
axes_.text(x_1, y_2 , params_str_ + error_text,
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
axes_.text(cus_loc[0], cus_loc[1] , params_str_ + error_text,
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
# Calculate trend line
coeffs = np.polyfit(x_, y_, 1)
intercept = coeffs[-1]
slope = coeffs[-2]
minxd = np.nanmin(x_)
maxxd = np.nanmax(x_)
xl = np.array([minxd, maxxd])
yl = slope * xl + intercept
# Plot trend line
axes_.plot(xl, yl, c, alpha=alpha)
# Calculate R Squared
p = np.poly1d(coeffs)
ybar = np.sum(y_) / len(y_)
ssreg = np.sum((p(x_) - ybar) ** 2)
sstot = np.sum((y_ - ybar) ** 2)
Rsqr = float(ssreg / sstot)
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
if intercept >= 0:
if extra_text=='':
equat_text = '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
if extra_text=='':
equat_text = '$Y = %0.2f*x %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x %0.2f$' % (slope,intercept)
error_text = '$R^2 = %0.2f$' % Rsqr
if cus_loc is None:
axes_.text(x_1, y_2 , equat_text + '\n' + error_text,
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
axes_.text(cus_loc[0], cus_loc[1] , equat_text + '\n' + error_text,
horizontalalignment='left',verticalalignment='top',color=text_color)
# plot 1:1 line if true
if t_line_1_1:
xy_min = np.min([np.nanmin(x_),np.nanmin(y_)])
xy_max = np.max([np.nanmax(x_),np.nanmax(y_)])
axes_.plot([xy_min, xy_max], [xy_min, xy_max], 'k--')
return Rsqr
def color_y_axis(ax, color):
"""Color your axes."""
for t in ax.get_yticklabels():
t.set_color(color)
return None
def p_density_scatter( x_ , y_, fig_ax = None, cmap_=default_cm, sort = True, bins = 20, show_cbar=False, **kwargs ) :
"""
Scatter plot colored by 2d histogram
"""
x, y = coincidence(x_ , y_)
if fig_ax is None :
fig , ax = plt.subplots()
else:
fig = fig_ax[0]
ax = fig_ax[1]
data , x_e, y_e = np.histogram2d( x, y, bins = bins)
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data ,
np.vstack([x,y]).T , method = "splinef2d", bounds_error = False )
# Sort the points by density, so that the densest points are plotted last
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
im = ax.scatter( x, y, c=z, cmap=cmap_, lw=0, **kwargs)
if show_cbar:
color_bar = fig.colorbar(im, fraction=0.046, pad=0.04)
return ax
# diurnal variations
def diurnal_variability_boxplot(time_in_seconds, y_, fig_ax=None, x_header='Hours', y_header='',figure_filename='',
bin_size_hours=1, min_bin_population=10, start_value=0, figsize_=(10,6), title_str=''):
# convert time to struct
time_hour = np.zeros(time_in_seconds.shape[0], dtype=float)
time_mins = np.zeros(time_in_seconds.shape[0], dtype=float)
time_secs = np.zeros(time_in_seconds.shape[0], dtype=float)
for r_ in range(time_in_seconds.shape[0]):
time_hour[r_] = time.gmtime(time_in_seconds[r_])[3]
time_mins[r_] = time.gmtime(time_in_seconds[r_])[4]
time_secs[r_] = time.gmtime(time_in_seconds[r_])[5]
time_hours = time_hour + (time_mins + (time_secs/60))/60
# get coincidences only
x_val,y_val = coincidence(time_hours, y_)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# always ascending to increase efficiency
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
start_bin_edge = start_value
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size_hours:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size_hours:
last_row_temp = row_
break
x_binned.append(start_bin_edge + (bin_size_hours / 2))
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size_hours
last_row = last_row_temp
# start figure
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots(figsize=figsize_)
# add series
if bin_size_hours >= 1:
x_binned_int = np.array(x_binned)
else:
x_binned_int = x_binned
ax.boxplot(y_binned, 0, '', whis=[5,95], positions = x_binned_int,
showmeans = True, widths =bin_size_hours * .9, manage_xticks=False)
# if user selected x axes as hour
ax.xaxis.set_ticks(np.arange(0, 24, 3))
ax.set_xlabel(x_header)
ax.set_ylabel(y_header)
ax.set_title(title_str)
if figure_filename != '':
fig.savefig(figure_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return fig, ax, x_binned_int, y_binned
def plot_box_from_values(values_x, values_y, x_label=None, y_label=None, bin_size=1, min_bin_population=10,
fit_function = None, fit_fuction_by='mean', log_x=False,log_y=False,
custom_y_range_tuple = None, custom_x_range_tuple = None,
force_start=None, force_end=None, show_means=True,
notch=False, sym='', whis=(5,95)):
x_val_original = values_x
y_val_original = values_y
# get coincidences only
x_val,y_val = coincidence(x_val_original,y_val_original)
# start figure
fig, ax = plt.subplots(figsize=(8, 6))
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
if force_start is None:
start_bin_edge = np.nanmin(x_val)
else:
start_bin_edge = force_start
if force_end is None:
stop_bin = np.nanmax(x_val)
else:
stop_bin = force_end
last_row = 0
last_row_temp = last_row
while start_bin_edge <= stop_bin:
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
if bin_size == 1:
x_binned_arr = np.array(x_binned, dtype=int)
else:
x_binned_arr = np.array(x_binned)
# add series
box_dict = ax.boxplot(y_binned, notch=notch, sym=sym, whis=whis, positions = x_binned_arr,
showmeans = show_means, widths = bin_size * .9)
# axes labels
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if fit_function is not None:
# get mean only list
if fit_fuction_by=='mean':
y_s = []
for y_bin in y_binned:
y_s.append(np.nanmean(y_bin))
elif fit_fuction_by=='median':
y_s = []
for y_bin in y_binned:
y_s.append(np.nanmedian(y_bin))
elif fit_fuction_by=='max':
y_s = []
for y_bin in y_binned:
y_s.append(np.nanmax(y_bin))
elif fit_fuction_by=='min':
y_s = []
for y_bin in y_binned:
y_s.append(np.nanmin(y_bin))
else:
print('error, only possible fit_by are mean, median, max, min')
return
x_,y_= coincidence(x_binned_arr,y_s)
# axes labels
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if log_x:
ax.set_xscale("log") # , nonposy='clip')
if log_y:
ax.set_yscale("log") # , nonposy='clip')
params = curve_fit(fit_function, x_, y_)
print('fitted parameters')
print('%0.3f, %0.3f' % (params[0][0], params[0][1]))
# calculate R2
plotting_par_list = [x_]
params_str_ = ''
for i_, fit_par in enumerate(params[0]):
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
residuals = y_ - fit_line_y
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_ - np.mean(y_))**2)
Rsqr = float(1 - (ss_res / ss_tot))
print('R2 = %0.2f' % Rsqr)
fit_line_x = np.arange(0,int(np.max(x_))+1,.1)
plotting_par_list = [fit_line_x]
for fit_par in params[0]:
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
# fit_line_y = (a_ * (fit_line_x ** 3)) + (b_ * (fit_line_x ** 2)) + (c_ * fit_line_x) + d_
ax.plot(fit_line_x,fit_line_y,'k')
# ax.yaxis.set_ticks(np.arange(0, 2800, 200))
for i in range(len(x_)):
print('%0.2f, %0.2f' % (x_[i], y_[i]))
print('-' * 20)
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
plt.show()
medians_ = []
for i_ in box_dict['medians']:
medians_.append(i_.get_ydata()[0])
medians_ = np.array(medians_)
means_ = []
for i_ in box_dict['means']:
means_.append(i_.get_ydata()[0])
means_ = np.array(means_)
return fig, ax, box_dict, x_binned_arr, medians_, means_
def plot_diurnal_multi(values_array, header_array, x_index, y_index_list,add_line=None, median_=False,
bin_size=1, min_bin_population=10, legend_= True, y_label='',legend_loc=(.70,.80),
custom_y_range_tuple=None, custom_x_range_tuple=None, lw_=2,
return_stats=False, print_stats=False):
color_list = default_cm(np.linspace(0,1,len(y_index_list)))
# stats holder
stats_list_x = []
stats_list_y = []
# start figure
fig, ax = plt.subplots()#figsize=(16, 10))
for c_, parameter_index in enumerate(y_index_list):
color_ = color_list[c_]
x_val_original = values_array[:,x_index]
y_val_original = values_array[:,parameter_index]
# get coincidences only
x_val,y_val = coincidence(x_val_original,y_val_original)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
start_bin_edge = np.nanmin(x_val)
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
# if bin_size >= 1:
# x_binned_int = np.array(x_binned, dtype=int)
# else:
# x_binned_int = x_binned
# get mean only list
y_means = []
for y_bin in y_binned:
if median_:
y_means.append(np.median(y_bin))
else:
y_means.append(np.mean(y_bin))
x_,y_= coincidence(np.array(x_binned),np.array(y_means))
# store stats
stats_list_x.append(x_)
stats_list_y.append(y_)
# print x and y
if print_stats:
print(header_array[parameter_index])
for i in range(len(x_)):
print(x_[i],y_[i])
print('-' * 10)
# add means series
ax.plot(x_, y_, color=color_, label=header_array[parameter_index], lw=lw_)
# axes labels
ax.set_xlabel(header_array[x_index])
ax.set_ylabel(y_label)
if legend_: ax.legend(loc=legend_loc)
ax.xaxis.set_ticks(np.arange(0, 24, 3))
#
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
if add_line is not None:
ax.plot(add_line[0], add_line[1], color='black', label=add_line[2], lw=lw_)
#
plt.show()
if return_stats:
return stats_list_x, stats_list_y
def plot_diurnal_multi_wind_direction(header_array, time_array_list, wd_ws_list_list,
bin_size=1, min_bin_population=10, legend_= True, y_label='', x_label='',legend_loc='best',
custom_y_range_tuple=None, custom_x_range_tuple=None, lw_=0, size_=5):
color_list = default_cm(np.linspace(0,1,len(time_array_list)))
# start figure
fig, ax = plt.subplots()#figsize=(16, 10))
for c_ in range(len(time_array_list)):
color_ = color_list[c_]
x_val_original = time_array_list[c_]
wd_val_original = wd_ws_list_list[c_][0]
ws_val_original = wd_ws_list_list[c_][1]
# # get coincidences only
# wd_val,ws_val = coincidence(wd_val_original,ws_val_original)
North_, East_ = polar_to_cart(wd_val_original, ws_val_original)
M_ = np.column_stack((North_,East_))
Index_mean, Values_mean = mean_discrete(x_val_original, M_, bin_size, 0, min_data=min_bin_population)
WD_mean, WS_mean = cart_to_polar(Values_mean[:,0], Values_mean[:,1])
# add means series
ax.scatter(Index_mean, WD_mean, s = size_, c=color_, label=header_array[c_], lw = lw_)
# axes labels
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.yaxis.set_ticks(np.arange(0, 361, 45))
if legend_: ax.legend(loc=legend_loc)
ax.xaxis.set_ticks(np.arange(0, 24, 3))
#
if custom_y_range_tuple is not None: ax.set_ylim(custom_y_range_tuple)
if custom_x_range_tuple is not None: ax.set_xlim(custom_x_range_tuple)
#
plt.show()
def fit_test_1(values_x, values_y, fit_func, x_label=None, y_label=None, bin_size=1,min_bin_population=10):
x_val_original = values_x
y_val_original = values_y
# get coincidences only
x_val,y_val = coincidence(x_val_original,y_val_original)
# start figure
fig, ax = plt.subplots()#figsize=(16, 10))
# combine x and y in matrix
M_ = | np.column_stack((x_val,y_val)) | numpy.column_stack |
import numpy as np
from nuspacesim import eas_optical
def test_known_values():
altitude = | np.arange(0, 21, dtype=np.float32) | numpy.arange |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Checking consistency of serialization
#
# This test checks the consistency of the internal states between before/after serialization. We render two images. One with normal configuration and the other with deserialized states.
import lmenv
env = lmenv.load('.lmenv')
import os
import imageio
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmscene
import lightmetrica as lm
os.getpid()
# %load_ext lightmetrica_jupyter
lm.init()
lm.log.init('jupyter')
lm.progress.init('jupyter')
lm.info()
if not lm.Release:
lm.debug.attach_to_debugger()
scene_names = lmscene.scenes_small()
def rmse(img1, img2):
return np.sqrt(np.mean((img1 - img2) ** 2))
rmse_series = pd.Series(index=scene_names)
for scene_name in scene_names:
print("Testing [scene='{}']".format(scene_name))
# Load scene and render
print('w/o serialization')
lm.reset()
lm.load_film('film_output', 'bitmap', {
'w': 1920,
'h': 1080
})
lm.load_accel('accel', 'sahbvh', {})
scene = lm.load_scene('scene', 'default', {
'accel': '$.assets.accel'
})
lmscene.load(scene, env.scene_path, scene_name)
scene.build()
lm.load_renderer('renderer', 'raycast', {
'scene': '$.assets.scene',
'output': '$.assets.film_output',
})
renderer = lm.get_renderer('$.assets.renderer')
renderer.render()
film = lm.get_film('$.assets.film_output')
img_orig = np.copy(film.buffer())
# Visualize
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img_orig,1/2.2),0,1), origin='lower')
plt.show()
# Serialize, reset, deserialize, and render
print('w/ serialization')
lm.save_state_to_file('lm.serialized')
lm.reset()
lm.load_state_from_file('lm.serialized')
renderer = lm.get_renderer('$.assets.renderer')
renderer.render()
film = lm.get_film('$.assets.film_output')
img_serial = np.copy(film.buffer())
# Visualize
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip( | np.power(img_serial,1/2.2) | numpy.power |
"""Functions to visualize the simplex and branch and bound algorithms.
This moodule uses a custom implementation of the resvised simplex method and
the branch and bound algorithm (simplex module) to create and solve LPs. Using
the graphic module (which provides a high-level interface with the plotly
visualization package) and computational geometry functions from the geometry
module, visualizations of these algorithms are then created to be viewed inline
on a Jupyter Notebook or written to a static HTML file.
"""
__author__ = '<NAME>'
__all__ = ['lp_visual', 'simplex_visual', 'bnb_visual']
import itertools
import math
import networkx as nx
import numpy as np
import plotly.graph_objects as plt
from typing import Union, List, Tuple
from ._constants import (AXIS_2D, AXIS_3D, BFS_SCATTER, BNB_NODE,
CANONICAL_TABLE, CONSTRAINT_LINE, CONSTRAINT_POLYGON,
DICTIONARY_TABLE, FIG_HEIGHT, FIG_WIDTH,
ISOPROFIT_IN_POLYGON, ISOPROFIT_LINE, INTEGER_POINT,
ISOPROFIT_OUT_POLYGON, ISOPROFIT_STEPS, LAYOUT,
LEGEND_WIDTH, PRIMARY_COLOR, PRIMARY_DARK_COLOR,
REGION_2D_POLYGON, REGION_3D_POLYGON, SCATTER,
SCATTER_3D, SECONDARY_COLOR, SLIDER, TABLE,
TERTIARY_DARK_COLOR, TERTIARY_LIGHT_COLOR, VECTOR)
from ._geometry import (intersection, interior_point, NoInteriorPoint,
polytope_vertices, polytope_facets)
from ._graphic import (num_format, equation_string, linear_string, plot_tree,
Figure, label, table, vector, scatter, equation,
polygon, polytope)
from .simplex import (LP, simplex, branch_and_bound_iteration,
UnboundedLinearProgram, Infeasible)
class InfiniteFeasibleRegion(Exception):
"""Raised when an LP is found to have an infinite feasible region and can
not be accurately visualized."""
pass
def template_figure(n: int, visual_type: str = 'tableau') -> Figure:
"""Return a figure on which to create a visualization.
The figure can be for a 2 or 3 dimensional linear program and is either of
type tableau (in which the tableau of each simplex iteration is on the
right subplot) or type bnb_tree (in which a branch and bound tree is
visualized shown on the right subplot).
Args:
n (int): Dimension of the LP visualization. Either 2 or 3.
visual_type (str): Type of visualization. Tableau by default.
Returns:
Figure: A figure on which to create a visualization.
Raises:
ValueError: Can only visualize 2 or 3 dimensional LPs.
"""
if n not in [2,3]:
raise ValueError('Can only visualize 2 or 3 dimensional LPs.')
# Subplots: plot on left, table/tree on right
plot_type = {2: 'scatter', 3: 'scene'}[n]
visual_type = {'tableau': 'table', 'bnb_tree': 'scatter'}[visual_type]
fig = Figure(subplots=True, rows=1, cols=2,
horizontal_spacing=(LEGEND_WIDTH / FIG_WIDTH),
specs=[[{"type": plot_type},{"type": visual_type}]])
layout = LAYOUT.copy()
# Set axes
x_domain = [0, (1 - (LEGEND_WIDTH / FIG_WIDTH)) / 2]
y_domain = [0, 1]
x = "x<sub>%d</sub>"
if n == 2:
layout['xaxis1'] = {**AXIS_2D, **dict(domain=x_domain, title=x % (1))}
layout['yaxis1'] = {**AXIS_2D, **dict(domain=y_domain, title=x % (2))}
else:
layout['scene'] = dict(aspectmode='cube',
domain=dict(x=x_domain, y=y_domain),
xaxis={**AXIS_3D, **dict(title=x % (1))},
yaxis={**AXIS_3D, **dict(title=x % (2))},
zaxis={**AXIS_3D, **dict(title=x % (3))})
# Rotate through 6 line colors
colors = ['#173D90', '#1469FE', '#65ADFF', '#474849', '#A90C0C', '#DC0000']
scatter = [plt.Scatter({**SCATTER, **dict(line_color=c)}) for c in colors]
# Annotation templates for branch and bound tree nodes
layout['annotations'] = [
{**BNB_NODE, **dict(name='current', bgcolor='#45568B',
font_color=TERTIARY_LIGHT_COLOR)},
{**BNB_NODE, **dict(name='explored', bgcolor='#D8E4F9')},
{**BNB_NODE, **dict(name='unexplored', bgcolor=TERTIARY_LIGHT_COLOR)}
]
# Conslidate and construct the template
template = plt.layout.Template()
template.layout = layout
template.data.table = [plt.Table(TABLE)]
template.data.scatter = scatter
template.data.scatter3d = [plt.Scatter3d(SCATTER_3D)]
fig.update_layout(template=template)
# Right subplot axes
right_x_axis = dict(domain=[0.5, 1], range=[0,1], visible=False)
right_y_axis = dict(domain=[0.15, 1], range=[0,1], visible=False)
if n == 2:
fig.layout.xaxis2 = right_x_axis
fig.layout.yaxis2 = right_y_axis
else:
fig.layout.xaxis = right_x_axis
fig.layout.yaxis = right_y_axis
return fig
def scale_axes(fig: Figure,
vertices: List[np.ndarray],
scale: float = 1.3):
"""Scale the axes of the figure to fit the given set of vertices.
Args:
fig (Figure): Figure whose axes will get re-scaled.
vertices (List[np.ndarray]): Set of vertices to be contained.
scale (float): The factor to multiply the minumum axis lengths by.
"""
x_list = [list(x[:,0]) for x in vertices]
limits = [max(i)*scale for i in list(zip(*x_list))]
fig.set_axis_limits(limits)
def bfs_plot(lp: LP,
basic_sol: bool = True,
show_basis: bool = True,
vertices: List[np.ndarray] = None
) -> Union[plt.Scatter, plt.Scatter3d]:
"""Return a scatter trace with hover labels for every basic feasible sol.
Vertices of LP's feasible region can be given to improve computation time.
Args:
lp (LP): LP whose basic feasible solutions will be plotted.
basic_sol (bool): True if the entire BFS is shown. Default to True.
show_basis (bool) : True if the basis is shown within the BFS label.
vertices (List[np.ndarray]): Vertices of the LP's feasible region.
Returns:
Union[plt.Scatter, plt.Scatter3d]: Scatter trace for every BFS.
"""
n,m,A,b,c = lp.get_coefficients(equality=False)
if vertices is None:
vertices = lp.get_vertices()
vertices_arr = np.array([list(v[:,0]) for v in vertices])
bfs = vertices_arr
# Add slack variable values to basic feasible solutions
for i in range(m):
x_i = -np.matmul(vertices_arr,np.array([A[i]]).transpose()) + b[i]
bfs = | np.hstack((bfs,x_i)) | numpy.hstack |
import numpy as np
import numpy.testing as npt
from stumpy import scraamp, aamp, config
from stumpy.scraamp import prescraamp
import pytest
import naive
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
percentages = [(0.01, 0.1, 1.0)]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_self_join(T_A, T_B):
for p in [1.0, 2.0, 3.0]:
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_B, s=s, exclusion_zone=zone, p=p)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, s=s, p=p)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_A_B_join(T_A, T_B):
for p in [1.0, 2.0, 3.0]:
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_A, m, T_B, s=s, p=p)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_A, m, T_B=T_B, s=s, p=p)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_A_B_join_swap(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_A, s=s)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, T_B=T_A, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_prescraamp_self_join_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_B, s=s, exclusion_zone=zone)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, s=s)
npt.assert_almost_equal(ref_P, comp_P)
| npt.assert_almost_equal(ref_I, comp_I) | numpy.testing.assert_almost_equal |
# Mathematical
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
import cv2
# Pytorch
import torch
from torch.utils import data
from torchvision import datasets
# Misc
from functools import lru_cache
import os
import sys
sys.path.insert(0, '/home/bo/research/realtime-action-detection')
from utils.augmentations import SSDAugmentation
import collections
def genuv(h, w):
u, v = np.meshgrid(np.arange(w), np.arange(h))
u = (u + 0.5) * 2 * np.pi / w - np.pi
v = (v + 0.5) * np.pi / h - np.pi / 2
return np.stack([u, v], axis=-1)
def uv2xyz(uv):
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
sin_v = np.sin(uv[..., 1])
cos_v = np.cos(uv[..., 1])
return np.stack([
cos_v * cos_u,
cos_v * sin_u,
sin_v
], axis=-1)
def xyz2uv(xyz):
c = np.sqrt((xyz[..., :2] ** 2).sum(-1))
u = np.arctan2(xyz[..., 1], xyz[..., 0])
v = np.arctan2(xyz[..., 2], c)
return np.stack([u, v], axis=-1)
def get_rotated_mat(outshape,inshape,rot_x,rot_y,rot_z,fov):
uv = genuv(*outshape)
xyz = uv2xyz(uv.astype(np.float64))
# rotate along x-axis
xyz_rot = xyz.copy()
xyz_rot[..., 0] = xyz[..., 0]
xyz_rot[..., 1] = np.cos(rot_x) * xyz[..., 1] - np.sin(rot_x) * xyz[..., 2]
xyz_rot[..., 2] = np.sin(rot_x) * xyz[..., 1] + np.cos(rot_x) * xyz[..., 2]
# rotate along y-axis
xyz = xyz_rot.copy()
xyz_rot = xyz.copy()
xyz_rot[..., 0] = | np.cos(rot_y) | numpy.cos |
import numpy as np
import tensorflow as tf
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = | np.sin(angle_rads[:, 0::2]) | numpy.sin |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from hypothesis import assume, given
from hypothesis.extra.numpy import from_dtype
from hypothesis.strategies import booleans, floats, integers, just, lists, sampled_from
from hypothesis_gufunc.gufunc import gufunc_args as gufunc
from scipy.interpolate import interp1d
from sklearn.preprocessing import LabelBinarizer
import bayesmark.space as sp
from bayesmark.np_util import linear_rescale
from bayesmark.space import CAT_DTYPE, CAT_KIND, CAT_NATIVE_DTYPE
from hypothesis_util import broadcast_tester, close_enough, gufunc_floats
from util import space_configs
INT_MIN = np.iinfo("i").min
INT_MAX = np.iinfo("i").max
WARPS = ("logit", "linear", "bilog", "log")
ENCODER_DTYPES = ("bool", "int", "float")
def encoder_gen(args):
X, labels, assume_sorted, dtype, assume_valid = args
if assume_sorted:
labels = np.sort(labels)
X = labels[X % len(labels)]
dtype = dtype.item() # np.array does not like np.array(dtype)
return X, labels, assume_sorted, dtype, assume_valid
def decoder_gen(args):
Y, labels, assume_sorted, dtype, assume_valid = args
if assume_sorted:
labels = np.sort(labels)
dtype = dtype.item()
return Y, labels, assume_sorted, dtype, assume_valid
def decoder_gen_broadcast(args):
Y, labels, assume_sorted = args
if assume_sorted:
labels = np.sort(labels)
return Y, labels, assume_sorted
@given(
gufunc(
"(),(n),(),(),()->(n)",
dtype=[np.int_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[
integers(0, INT_MAX),
from_dtype(np.dtype(CAT_DTYPE)),
booleans(),
sampled_from(ENCODER_DTYPES),
booleans(),
],
unique=[False, True, False, False, False],
min_side={"n": 1},
).map(encoder_gen)
)
def test_encode_decode(args):
X, labels, assume_sorted, dtype, assume_valid = args
Y = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
if assume_sorted: # otherwise labels will be re-arranged
(idx,), = np.where(Y > 0)
assert np.asarray(labels[idx]) == X
assert Y.dtype == dtype
X2 = sp.decode(Y, labels, assume_sorted=assume_sorted)
assert close_enough(X, X2)
@given(
gufunc(
"(m),(n),(),(),()->(n)",
dtype=[np.int_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[
integers(0, INT_MAX),
from_dtype(np.dtype(CAT_DTYPE)),
booleans(),
sampled_from(ENCODER_DTYPES),
booleans(),
],
unique=[False, True, False, False, False],
min_side={"m": 1, "n": 3},
).map(encoder_gen)
)
def test_encoder_to_sklearn(args):
# sklearn cannot handle this correctly unless n >= 3
X, labels, assume_sorted, dtype, assume_valid = args
Y = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
enc = LabelBinarizer()
enc.fit(labels)
Y2 = enc.transform(X)
assert close_enough(Y, Y2.astype(dtype))
@given(
gufunc(
"(m,n),(n),(),(),()->(n)",
dtype=[np.float_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[floats(), from_dtype(np.dtype(CAT_DTYPE)), booleans(), sampled_from(ENCODER_DTYPES), booleans()],
unique=[False, True, False, False, False],
min_side={"n": 1},
).map(decoder_gen)
)
def test_decode_encode(args):
Y, labels, assume_sorted, dtype, assume_valid = args
assert Y.ndim >= 1 and Y.shape[-1] == len(labels)
X = sp.decode(Y, labels, assume_sorted=assume_sorted)
Y2 = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
# The encoding is defined as the argmax
assert np.all(Y.argmax(axis=1) == Y2.argmax(axis=1))
assert np.all(np.sum(Y2 != 0, axis=1) == 1)
assert np.all(np.sum(Y2 == 1, axis=1) == 1)
@given(
gufunc(
"(m,n),(n),(),(),()->(n)",
dtype=[np.float_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[floats(), from_dtype( | np.dtype(CAT_DTYPE) | numpy.dtype |
import scipy.io.wavfile as sio
import scipy.signal as sis
from scipy import interpolate
import numpy as np
import math
import matplotlib.pyplot as plt
import mylib as myl
import sys
import copy as cp
import re
import scipy.fftpack as sf
# NOTE: int2float might be removed after scipy update/check
# (check defaults in myl.sig_preproc)
# read wav file
# IN:
# fileName
# OUT:
# signal ndarray
# sampleRate
def wavread(f,opt={'do_preproc':True}):
## signal input
fs, s_in = sio.read(f)
# int -> float
s = myl.wav_int2float(s_in)
# preproc
if opt['do_preproc']:
s = myl.sig_preproc(s)
return s, fs
# DCT
# IN:
# y - 1D signal vector
# opt
# ['fs'] - sample rate
# ['wintyp'] - <'kaiser'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <1> additionally needed window parameters,
# scalar, string, list ..., depends on 'wintyp'
# ['nsm'] - <3> number of spectral moments
# ['rmo'] - skip first (lowest) cosine (=constant offset)
# in spectral moment calculation <1>|0
# ['lb'] - lower cutoff frequency for coef truncation <0>
# ['ub'] - upper cutoff frequency (if 0, no cutoff) <0>
# Recommended e.g. for f0 DCT, so that only influence
# of events with <= 10Hz on f0 contour is considered)
# ['peak_prct'] - <80> lower percentile threshold to be superseeded for
# amplitude maxima in DCT spectrum
# OUT:
# dct
# ['c_orig'] all coefs
# ['f_orig'] their frequencies
# ['c'] coefs with freq between lb and ub
# ['f'] their freqs
# ['i'] their indices in c_orig
# ['sm'] spectral moments based on c
# ['opt'] input options
# ['m'] y mean
# ['sd'] y standard dev
# ['cbin'] array of sum(abs(coef)) in frequency bins
# ['fbin'] corresponding lower boundary freqs
# ['f_max'] frequency of global amplitude maximum
# ['f_lmax'] frequencies of local maxima (array of minlen 1)
# ['c_cog'] the coef amplitude of the cog freq (sm[0])
# PROBLEMS:
# - if segment is too short (< 5 samples) lowest freqs associated to
# DCT components are too high for ub, that is dct_trunc() returns
# empty array.
# -> np.nan assigned to respective variables
def dct_wrapper(y,opt):
dflt={'wintyp':'kaiser','winparam':1,'nsm':3,'rmo':True,
'lb':0,'ub':0,'peak_prct':80}
opt = myl.opt_default(opt,dflt)
# weight window
w = sig_window(opt['wintyp'],len(y),opt['winparam'])
y = y*w
#print(1,len(y))
# centralize
y = y-np.mean(y)
#print(2,len(y))
# DCT coefs
c = sf.dct(y,norm='ortho')
#print(3,len(c))
# indices (starting with 0)
ly = len(y)
ci = myl.idx_a(ly)
# corresponding cos frequencies
f = ci+1 * (opt['fs']/(ly*2))
# band pass truncation of coefs
# indices of coefs with lb <= freq <= ub
i = dct_trunc(f,ci,opt)
#print('f ci i',f,ci,i)
# analysis segment too short -> DCT freqs above ub
if len(i)==0:
sm = myl.ea()
while len(sm) <= opt['nsm']:
sm = np.append(sm,np.nan)
return {'c_orig':c,'f_orig':f,'c':myl.ea(),'f':myl.ea(),'i':[],'sm':sm,'opt':opt,
'm':np.nan,'sd':np.nan,'cbin':myl.ea(),'fbin':myl.ea(),
'f_max':np.nan, 'f_lmax':myl.ea(), 'c_cog': np.nan}
# mean abs error from band-limited IDCT
#mae = dct_mae(c,i,y)
# remove constant offset with index 0
# already removed by dct_trunc in case lb>0. Thus checked for i[0]==0
# (i[0] indeed represents constant offset; tested by
# cr = np.zeros(ly); cr[0]=c[0]; yr = sf.idct(cr); print(yr)
if opt['rmo']==True and len(i)>1 and i[0]==0:
j = i[1:len(i)]
else:
j = i
if type(j) is not list: j = [j]
# coefs and their frequencies between lb and ub
# (+ constant offset removed)
fi = f[j]
ci = c[j]
# spectral moments
if len(j)>0:
sm = specmom(ci,fi,opt['nsm'])
else:
sm = np.zeros(opt['nsm'])
# frequency bins
fbin, cbin = dct_fbin(fi,ci,opt)
# frequencies of global and local maxima in DCT spectrum
f_max, f_lmax, px = dct_peak(ci,fi,sm[0],opt)
# return
return {'c_orig':c,'f_orig':f,'c':ci,'f':fi,'i':j,'sm':sm,'opt':opt,
'm':np.mean(y),'sd':np.std(y),'cbin':cbin,'fbin':fbin,
'f_max':f_max, 'f_lmax':f_lmax, 'c_cog': px}
# returns local and max peak frequencies
# IN:
# x: array of abs coef amplitudes
# f: corresponding frequencies
# cog: center of gravity
# OUT:
# f_gm: freq of global maximu
# f_lm: array of freq of local maxima
# px: threshold to be superseeded (derived from prct specs)
def dct_peak(x,f,cog,opt):
x = abs(cp.deepcopy(x))
## global maximum
i = myl.find(x,'is','max')
if len(i)>1:
i=int(np.mean(i))
f_gm = float(f[i])
## local maxima
# threshold to be superseeded
px = dct_px(x,f,cog,opt)
idx = myl.find(x,'>=',px)
# 2d array of neighboring+1 indices
# e.g. [[0,1,2],[5,6],[9,10]]
ii = []
# min freq distance between maxima
fd_min = 1
for i in myl.idx(idx):
if len(ii)==0:
ii.append([idx[i]])
elif idx[i]>ii[-1][-1]+1:
xi = x[ii[-1]]
fi = f[ii[-1]]
j = myl.find(xi,'is','max')
#print('xi',xi,'fi',fi,'f',f[idx[i]])
if len(j)>0 and f[idx[i]]>fi[j[0]]+fd_min:
#print('->1')
ii.append([idx[i]])
else:
#print('->2')
ii[-1].append(idx[i])
#myl.stopgo() #!c
else:
ii[-1].append(idx[i])
# get index of x maximum within each subsegment
# and return corresponding frequencies
f_lm = []
for si in ii:
zi = myl.find(x[si],'is','max')
if len(zi)>1:
zi=int(np.mean(zi))
else:
zi = zi[0]
i = si[zi]
if not np.isnan(i):
f_lm.append(f[i])
#print('px',px)
#print('i',ii)
#print('x',x)
#print('f',f)
#print('m',f_gm,f_lm)
#myl.stopgo()
return f_gm, f_lm, px
# return center-of-gravity related amplitude
# IN:
# x: array of coefs
# f: corresponding freqs
# cog: center of gravity freq
# opt
# OUT:
# coef amplitude related to cog
def dct_px(x,f,cog,opt):
x = abs(cp.deepcopy(x))
# cog outside freq range
if cog <= f[0]:
return x[0]
elif cog >= f[-1]:
return x[-1]
# find f-indices adjacent to cog
for i in range(len(f)-1):
if f[i] == cog:
return x[i]
elif f[i+1] == cog:
return x[i+1]
elif f[i] < cog and f[i+1] > cog:
# interpolate
#xi = np.interp(cog,f[i:i+2],x[i:i+2])
#print('cog:',cog,'xi',f[i:i+2],x[i:i+2],'->',xi)
return np.interp(cog,f[i:i+2],x[i:i+2])
return np.percentile(x,opt['peak_prct'])
# pre-emphasis
# alpha > 1 (interpreted as lower cutoff freq)
# alpha <- exp(-2 pi alpha delta)
# s'[n] = s[n]-alpha*s[n-1]
# IN:
# signal
# alpha - s[n-1] weight <0.95>
# fs - sample rate <-1>
# do_scale - <FALSE> if TRUE than the pre-emphasized signal is scaled to
# same abs_mean value as original signal (in general pre-emphasis
# leads to overall energy loss)
def pre_emphasis(y,a=0.95,fs=-1,do_scale=False):
# determining alpha directly or from cutoff freq
if a>1:
if fs <= 0:
print('pre emphasis: alpha cannot be calculated deltaT. Set to 0.95')
a = 0.95
else:
a = math.exp(-2*math.pi*a*1/fs)
#print('alpha',a)
# shifted signal
ype = np.append(y[0], y[1:] - a * y[:-1])
# scaling
if do_scale:
sf = np.mean(abs(y))/np.mean(abs(ype))
ype*=sf
## plot
#ys = y[30000:40000]
#ypes = ype[30000:40000]
#t = np.linspace(0,len(ys),len(ys))
#fig, spl = plt.subplots(2,1,squeeze=False)
#cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
#cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#spl[0,0].plot(t,ys)
#spl[1,0].plot(t,ypes)
#plt.show()
##
return ype
# frequency bins: symmetric 2-Hz windows around freq integers
# in bandpass overlapped by 1 Hz
# IN:
# f - ndarray frequencies
# c - ndarray coefs
# opt['lb'] - lower and upper truncation freqs
# ['ub']
# OUT:
# fbin - ndarray, lower bnd of freq bins
# cbin - ndarray, summed abs coef values in these bins
def dct_fbin(f,c,opt):
fb = myl.idx_seg(math.floor(opt['lb']),math.ceil(opt['ub']))
cbin = np.zeros(len(fb)-1);
for j in myl.idx_a(len(fb)-1):
k = myl.intersect(myl.find(f,'>=',fb[j]),
myl.find(f,'<=',fb[j+1]))
cbin[j] = sum(abs(c[k]))
fbin = fb[myl.idx_a(len(fb)-1)]
return fbin, cbin
# spectral moments
# IN:
# c - ndarray, coefficients
# f - ndarray, related frequencies <1:len(c)>
# n - number of spectral moments <3>
# OUT:
# m - ndarray moments (increasing)
def specmom(c,f=[],n=3):
if len(f)==0:
f = myl.idx_a(len(c))+1
c = abs(c)
s = sum(c)
k=0;
m = np.asarray([])
for i in myl.idx_seg(1,n):
m = myl.push(m, sum(c*((f-k)**i))/s)
k = m[-1]
return m
# wrapper around IDCT
# IN:
# c - coef vector derived by dct
# i - indices of coefs to be taken for IDCT; if empty (default),
# all coefs taken)
# OUT:
# y - IDCT result
def idct_bp(c,i=myl.ea()):
if len(i)==0:
return sf.idct(c,norm='ortho')
cr = np.zeros(len(c))
cr[i]=c[i]
return sf.idct(cr)
# mean abs error from IDCT
def dct_mae(c,i,y):
cr = np.zeros(len(c))
cr[i]=c[i]
yr = sf.idct(cr)
return myl.mae(yr,y)
# indices to truncate DCT output to freq band
# IN:
# f - ndarray, all frequencies
# ci - all indices of coef ndarray
# opt['lb'] - lower cutoff freq
# ['ub'] - upper cutoff freq
# OUT:
# i - ndarray, indices in F of elements to be kept
def dct_trunc(f,ci,opt):
if opt['lb']>0:
ihp = myl.find(f,'>=',opt['lb'])
else:
ihp = ci
if opt['ub']>0:
ilp = myl.find(f,'<=',opt['ub'])
else:
ilp = ci
return myl.intersect(ihp,ilp)
# wrapper around wavread and energy calculation
# IN:
# f: wavFileName (any number of channels) or array containing
# the signal (any number of channels=columns)
# opt: energy extraction and postprocessing
# .win, .wintyp, .winparam: window parameters
# .sts: stepsize for energy contour
# .do_preproc: centralizing signal
# .do_out: remove outliers
# .do_interp: linear interpolation over silence
# .do_smooth: smoothing (median or savitzky golay)
# .out dict; see pp_outl()
# .smooth dict; see pp_smooth()
# fs: <-1> needed if f is array
# OUT:
# y: time + energy contour 2-dim np.array
# (1st column: time, other columns: energy)
def wrapper_energy(f,opt = {}, fs = -1):
opt = myl.opt_default(opt,{'wintyp':'hamming',
'winparam':'',
'sts':0.01,
'win':0.05,
'do_preproc': True,
'do_out': False,
'do_interp': False,
'do_smooth': False,
'out': {},
'smooth': {}})
opt['out'] = myl.opt_default(opt['out'], {'f': 3,
'm': 'mean'})
opt['smooth'] = myl.opt_default(opt['smooth'],{"mtd": "sgolay",
"win": 7,
"ord": 3})
if type(f) is str:
s, fs = wavread(f,opt)
else:
if fs < 0:
sys.exit("array input requires sample rate fs. Exit.")
s = f
opt['fs']=fs
# convert to 2-dim array; each column represents a channel
if np.ndim(s)==1:
s = np.expand_dims(s, axis=1)
# output (.T-ed later, reserve first list for time)
y = myl.ea()
# over channels
for i in np.arange(0,s.shape[1]):
e = sig_energy(s[:,i],opt)
# setting outlier to 0
if opt['do_out']:
e = pp_outl(e,opt['out'])
# interpolation over 0
if opt['do_interp']:
e = pp_interp(e)
# smoothing
if opt['do_smooth']:
e = pp_smooth(e,opt['smooth'])
# <0 -> 0
e[myl.find(e,'<',0)]=0
y = myl.push(y,e)
# output
if np.ndim(y)==1:
y = np.expand_dims(y, axis=1)
else:
y = y.T
# concat time as 1st column
sts = opt['sts']
t = np.arange(0,sts*y.shape[0],sts)
if len(t) != y.shape[0]:
while len(t) > y.shape[0]:
t = t[0:len(t)-1]
while len(t) < y.shape[0]:
t = np.append(t,t[-1]+sts)
t = np.expand_dims(t, axis=1)
y = np.concatenate((t,y),axis=1)
return y
### replacing outliers by 0 ###################
def pp_outl(y,opt):
if "m" not in opt:
return y
# ignore zeros
opt['zi'] = True
io = myl.outl_idx(y,opt)
if np.size(io)>0:
y[io] = 0
return y
### interpolation over 0 (+constant extrapolation) #############
def pp_interp(y,opt={}):
xi = myl.find(y,'==',0)
xp = myl.find(y,'>',0)
yp = y[xp]
if "kind" in opt:
f = interpolate.interp1d(xp,yp,kind=opt["kind"],
fill_value=(yp[0],yp[-1]))
yi = f(xi)
else:
yi = np.interp(xi,xp,yp)
y[xi]=yi
return y
#!check
### smoothing ########################################
# remark: savgol_filter() causes warning
# Using a non-tuple sequence for multidimensional indexing is deprecated
# will be out with scipy.signal 1.2.0
# (https://github.com/scipy/scipy/issues/9086)
def pp_smooth(y,opt):
if opt['mtd']=='sgolay':
if len(y) <= opt['win']:
return y
y = sis.savgol_filter(y,opt['win'],opt['ord'])
elif opt['mtd']=='med':
y = sis.medfilt(y,opt['win'])
return y
# calculates energy contour from acoustic signal
# do_preproc per default False. If not yet preprocessed by myl.sig_preproc()
# set to True
# IN:
# x ndarray signal
# opt['fs'] - sample frequency
# ['wintyp'] - <'hamming'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <''> additionally needed window parameters,
# scalar, string, list ...
# ['sts'] - stepsize of moving window
# ['win'] - window length
# OUT:
# y ndarray energy contour
def sig_energy(x,opt):
dflt={'wintyp':'hamming','winparam':'','sts':0.01,'win':0.05}
opt = myl.opt_default(opt,dflt)
# stepsize and winlength in samples
sts = round(opt['sts']*opt['fs'])
win = min([math.floor(len(x)/2),round(opt['win']*opt['fs'])])
# weighting window
w = sig_window(opt['wintyp'],win,opt['winparam'])
# energy values
y = np.asarray([])
for j in myl.idx_a(len(x)-win,sts):
s = x[j:j+len(w)]*w
y = myl.push(y,myl.rmsd(s))
return y
# wrapper around windows
# IN:
# typ: any type supported by scipy.signal.get_window()
# lng: <1> length
# par: <''> additional parameters as string, scalar, list etc
# OUT:
# window array
def sig_window(typ,l=1,par=''):
if typ=='none' or typ=='const':
return np.ones(l)
if ((type(par) is str) and (len(par) == 0)):
return sis.get_window(typ,l)
return sis.get_window((typ,par),l)
# pause detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - idx onset <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <8000> (evtl. lowered by fu_filt())
# ['btype'] - <'band'>|'high'|<'low'>
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length (in sec)
# ['l_ref'] - reference window length (in sec)
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['fbnd'] - True|<False> assume pause at beginning and end of file
# ['n'] - <-1> extract exactly n pauses (if > -1)
# ['min_pau_l'] - min pause length <0.5> sec
# ['min_chunk_l'] - min inter-pausal chunk length <0.2> sec
# ['force_chunk'] - <False>, if True, pause-only is replaced by chunk-only
# ['margin'] - <0> time to reduce pause on both sides (sec; if chunks need init and final silence)
# OUT:
# pau['tp'] 2-dim array of pause [on off] (in sec)
# ['tpi'] 2-dim array of pause [on off] (indices in s = sampleIdx-1 !!)
# ['tc'] 2-dim array of speech chunks [on off] (i.e. non-pause, in sec)
# ['tci'] 2-dim array of speech chunks [on off] (indices)
# ['e_ratio'] - energy ratios corresponding to pauses in ['tp'] (analysisWindow/referenceWindow)
def pau_detector(s,opt={}):
if 'fs' not in opt:
sys.exit('pau_detector: opt does not contain key fs.')
dflt = {'e_rel':0.0767,'l':0.1524,'l_ref':5,'n':-1,'fbnd':False,'ons':0,'force_chunk':False,
'min_pau_l':0.4,'min_chunk_l':0.2,'margin':0,
'flt':{'btype':'low','f':np.asarray([8000]),'ord':5}}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
## removing DC, low-pass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
## pause detection for >=n pauses
t, e_ratio = pau_detector_sub(y,opt)
if len(t)>0:
## extending 1st and last pause to file boundaries
if opt['fbnd']==True:
t[0,0]=0
t[-1,-1]=len(y)-1
## merging pauses across too short chunks
## merging chunks across too small pauses
if (opt['min_pau_l']>0 or opt['min_chunk_l']>0):
t, e_ratio = pau_detector_merge(t,e_ratio,opt)
## too many pauses?
# -> subsequently remove the ones with highest e-ratio
if (opt['n']>0 and len(t)>opt['n']):
t, e_ratio = pau_detector_red(t,e_ratio,opt)
## speech chunks
tc = pau2chunk(t,len(y))
## pause-only -> chunk-only
if (opt['force_chunk']==True and len(tc)==0):
tc = cp.deepcopy(t)
t = np.asarray([])
e_ratio = np.asarray([])
## add onset
t = t+opt['ons']
tc = tc+opt['ons']
## return dict
## incl fields with indices to seconds (index+1=sampleIndex)
pau={'tpi':t, 'tci':tc, 'e_ratio': e_ratio}
pau['tp'] = myl.idx2sec(t,opt['fs'])
pau['tc'] = myl.idx2sec(tc,opt['fs'])
#print(pau)
return pau
# merging pauses across too short chunks
# merging chunks across too small pauses
# IN:
# t [[on off]...] of pauses
# e [e_rat ...]
# OUT:
# t [[on off]...] merged
# e [e_rat ...] merged (simply mean of merged segments taken)
def pau_detector_merge(t,e,opt):
## min pause and chunk length in samples
mpl = myl.sec2smp(opt['min_pau_l'],opt['fs'])
mcl = myl.sec2smp(opt['min_chunk_l'],opt['fs'])
## merging chunks across short pauses
tm = np.asarray([])
em = np.asarray([])
for i in myl.idx_a(len(t)):
if ((t[i,1]-t[i,0] >= mpl) or
(opt['fbnd']==True and (i==0 or i==len(t)-1))):
tm = myl.push(tm,t[i,:])
em = myl.push(em,e[i])
# nothing done in previous step?
if len(tm)==0:
tm = cp.deepcopy(t)
em = cp.deepcopy(e)
if len(tm)==0:
return t, e
## merging pauses across short chunks
tn = np.asarray([tm[0,:]])
en = np.asarray([em[0]])
if (tn[0,0]<mcl): tn[0,0]=0
for i in np.arange(1,len(tm),1):
if (tm[i,0] - tn[-1,1] < mcl):
tn[-1,1] = tm[i,1]
en[-1] = np.mean([en[-1],em[i]])
else:
tn = myl.push(tn,tm[i,:])
en = myl.push(en,em[i])
#print("t:\n", t, "\ntm:\n", tm, "\ntn:\n", tn) #!v
return tn, en
# pause to chunk intervals
# IN:
# t [[on off]] of pause segments (indices in signal)
# l length of signal vector
# OUT:
# tc [[on off]] of speech chunks
def pau2chunk(t,l):
if len(t)==0:
return np.asarray([[0,l-1]])
if t[0,0]>0:
tc = np.asarray([[0,t[0,0]-1]])
else:
tc = np.asarray([])
for i in np.arange(0,len(t)-1,1):
if t[i,1] < t[i+1,0]-1:
tc = myl.push(tc,[t[i,1]+1,t[i+1,0]-1])
if t[-1,1]<l-1:
tc = myl.push(tc,[t[-1,1]+1,l-1])
return tc
# called by pau_detector
# IN:
# as for pau_detector
# OUT:
# t [on off]
# e_ratio
def pau_detector_sub(y,opt):
## settings
# reference window span
rl = math.floor(opt['l_ref']*opt['fs'])
# signal length
ls = len(y)
# min pause length
ml = opt['l']*opt['fs']
# global rmse and pause threshold
e_rel = cp.deepcopy(opt['e_rel'])
# global rmse
# as fallback in case reference window is likely to be pause
# almost-zeros excluded (cf percentile) since otherwise pauses
# show a too high influence, i.e. lower the reference too much
# so that too few pauses detected
#e_glob = myl.rmsd(y)
ya = abs(y)
qq = np.percentile(ya,[50])
e_glob = myl.rmsd(ya[ya>qq[0]])
t_glob = opt['e_rel']*e_glob
# stepsize
sts=max([1,math.floor(0.05*opt['fs'])])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rl,'rng':[0,ls]}
# loop until opt.n criterion is fulfilled
# increasing energy threshold up to 1
while e_rel < 1:
# pause [on off], pause index
t=np.asarray([])
j=0
# [e_y/e_rw] indices as in t
e_ratio=np.asarray([])
i_steps = np.arange(1,ls,sts)
for i in i_steps:
# window
yi = myl.windowing_idx(i,wopt_en)
e_y = myl.rmsd(y[yi])
# energy in reference window
e_r = myl.rmsd(y[myl.windowing_idx(i,wopt_ref)])
# take overall energy as reference if reference window is pause
if (e_r <= t_glob):
e_r = e_glob
# if rmse in window below threshold
if e_y <= e_r*e_rel:
yis = yi[0]
yie = yi[-1]
if len(t)-1==j:
# values belong to already detected pause
if len(t)>0 and yis<t[j,1]:
t[j,1]=yie
# evtl. needed to throw away superfluous
# pauses with high e_ratio
e_ratio[j]=np.mean([e_ratio[j],e_y/e_r])
else:
t = myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
j=j+1
else:
t=myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
# (more than) enough pauses detected?
if len(t) >= opt['n']: break
e_rel = e_rel+0.1
if opt['margin']==0 or len(t)==0:
return t, e_ratio
# shorten pauses by margins
mar=int(opt['margin']*opt['fs'])
tm, erm = myl.ea(), myl.ea()
for i in myl.idx_a(len(t)):
# only slim non-init and -fin pauses
if i>0:
ts = t[i,0]+mar
else:
ts = t[i,0]
if i < len(t)-1:
te = t[i,1]-mar
else:
te = t[i,1]
# pause disappeared
if te <= ts:
# ... but needs to be kept
if opt['n']>0:
tm = myl.push(tm,[t[i,0],t[i,1]])
erm = myl.push(erm,e_ratio[i])
continue
# pause still there
tm = myl.push(tm,[ts,te])
erm = myl.push(erm,e_ratio[i])
return tm, erm
def pau_detector_red(t,e_ratio,opt):
# keep boundary pauses
if opt['fbnd']==True:
n=opt['n']-2
#bp = [t[0,],t[-1,]]
bp = np.concatenate((np.array([t[0,]]),np.array([t[-1,]])),axis=0)
ii = np.arange(1,len(t)-1,1)
t = t[ii,]
e_ratio=e_ratio[ii]
else:
n=opt['n']
bp=np.asarray([])
if n==0:
t=[]
# remove pause with highest e_ratio
while len(t)>n:
i = myl.find(e_ratio,'is','max')
j = myl.find(np.arange(1,len(e_ratio),1),'!=',i[0])
t = t[j,]
e_ratio = e_ratio[j]
# re-add boundary pauses if removed
if opt['fbnd']==True:
if len(t)==0:
t=np.concatenate((np.array([bp[0,]]),np.array([bp[1,]])),axis=0)
else:
t=np.concatenate((np.array([bp[0,]]),np.array([t]),np.array([bp[1,]])),axis=0)
return t, e_ratio
# spectral balance calculation according to Fant 2000
# IN:
# sig: signal (vowel segment)
# fs: sampe rate
# opt:
# 'win': length of central window in ms <len(sig)>; -1 is same as len(sig)
# 'ub': upper freq boundary in Hz <-1> default: no low-pass filtering
# 'domain': <'freq'>|'time'; pre-emp in frequency (Fant) or time domain
# 'alpha': <0.95> for time domain only y[n] = x[n]-alpha*x[n-1]
# if alpha>0 it is interpreted as lower freq threshold for pre-emp
# OUT:
# sb: spectral tilt
def splh_spl(sig,fs,opt_in={}):
opt = cp.deepcopy(opt_in)
opt = myl.opt_default(opt,{'win':len(sig),'f':-1,'btype':'none',
'domain':'freq','alpha':0.95})
#print(opt)
#myl.stopgo()
## cut out center window ##################################
ls = len(sig)
if opt['win'] <= 0:
opt['win'] = ls
if opt['win'] < ls:
wi = myl.windowing_idx(int(ls/2),
{'rng':[0, ls],
'win':int(opt['win']*fs)})
y = sig[wi]
else:
y = cp.deepcopy(sig)
if len(y)==0:
return np.nan
# reference sound pressure level
p_ref = pRef('spl')
## pre-emp in time domain ####################################
if opt['domain']=='time':
# low pass filtering
if opt['btype'] != 'none':
flt = fu_filt(y,{'fs':fs,'f':opt['f'],'ord':6,
'btype':opt['btype']})
y = flt['y']
yp = pre_emphasis(y,opt['alpha'],fs,False)
y_db = 20*np.log10(myl.rmsd(y)/p_ref)
yp_db = 20*np.log10(myl.rmsd(yp)/p_ref)
#print(yp_db - y_db)
return yp_db - y_db
## pre-emp in frequency domain ##############################
# according to Fant
# actual length of cut signal
n = len(y)
## hamming windowing
y *= np.hamming(n)
## spectrum
Y = np.fft.fft(y,n)
N = int(len(Y)/2)
## frequency components
XN = np.fft.fftfreq(n,d=1/fs)
X = XN[0:N]
# same as X = np.linspace(0, fs/2, N, endpoint=True)
## amplitudes
# sqrt(Y.real**2 + Y.imag**2)
# to be normalized:
# *2 since only half of transform is used
# /N since output needs to be normalized by number of samples
# (tested on sinus, cf
# http://www.cbcity.de/die-fft-mit-python-einfach-erklaert)
a = 2*np.abs(Y[:N])/N
## vowel-relevant upper frequency boundary
if opt['btype'] != 'none':
vi = fu_filt_freq(X,opt)
if len(vi)>0:
X = X[vi]
a = a[vi]
## Fant preemphasis filter (Fant et al 2000, p10f eq 20)
preemp = 10*np.log10((1+X**2/200**2)/(1+X**2/5000**2))
ap = 10* | np.log10(a) | numpy.log10 |
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt #graphics library
import numpy as np #numerical library (FFT etc)
import Nio # NCAR python io library
# set up the physical domain of the experiment to be run
# zs = topography (Fzs = FFT(zs))
# dx = grid cell size (m)
# U = wind speed (m/s)
# Hw = water vapor scale height
# Ndsq = Brunt Vaisalla freq. squared (s^-2)
# also sets up parameters for the linear model (e.g. tauf,c, cw)
# tauf = average hydrometeor fall time (seconds) ~500s? for rain w/Hw=5000, 1500s? for snow w/Hw=3000
# tauc = average hydrometeor formation time (seconds) ~500s for rain, longer for snow, faster for warmer
def setup_experiment(wind=2, experiment=1, verbose=False):
U = [5.0,10.0,15.0,25.0][wind] # wind speed
Ndsq = 3.6e-5; # dry BV freq sq. #original
# Ndsq = 0.002**2 #4e-6 # dry BV freq sq.
# Ndsq = 0.00011 #1e-4 # dry BV freq sq.
# experiment D1 D2 D3
# h height of the hill 1800 1400 1040 [meters]
# sigma half-width 60 40 3.1 [grid cells]
# z0 base of the hill 1700 2000 2200 [meters]
# G number of grids 420 250 52 [grid cells]
Nx = [420.,250.,52.][experiment]*2 # length of domain (grid cells)
hm = [1800.0,1400.0,1040.0][experiment] # mnt height (m)
xm = Nx/2.0 # mountain location in domain (grid cell)
am = [60.0,40.0,3.1][experiment] # mountain half-width (grid cells)
dx = 2000.0 # grid spacing (m)
Lx = Nx*dx # length of domain (m)
x = np.linspace(0,Lx,Nx) # distance array (m)
zo = [1700.0,2000.0,2200.0][experiment] # mountain base height (m) NOT REALLY USED CORRECTLY YET
p0 = 101325 * (1 - 2.25577e-5*zo)**5.25588 # compute base pressure
T2m = 268.0 # 270.56 #needs to be selected by experiment?
base_mr=[0.00325,0.0032,0.003][experiment] # mixing ratio at the base of the mountain
# base_mr = 0.003255
# base_mr = 0.0025687
# hw = 4000.0 # scale of water vapor (see formula in SB04,appendix)
# hw = hw - zo
# hw = 3000.0
# zo=0.0
# ----------------------------------------------------------------
# Make the mountain (theoretical)
#
# zs = hm*exp(-(x-xm).^2/am^2); Gaussian
zs = hm/(1.0+((x/dx-xm)/am)**2.) # eqn from Trude
zs = zs-zs[Nx/4] # sets the zero point to be 1/4 of the way in because we have doubled the size of the domain
zs[zs<0]=0 # set anything below 0 to 0
zs += zo
# -----------------------------------------------------------------
# put zs in Fourier space
Fzs = np.fft.fftshift(np.fft.fft(zs))/Nx
# linear model paramters (see calculations in SB'04):
# -------------------------------------------------------------------------------
t0 = 273.16
# p0 = 100000
# p0 = 82000.0 (now calculated above from z0)
L = 2.5e6
ratio = 18.015/28.964
R = 287.0
Rv = 461.0
cp = 1004.0
g = 9.81
es = 611.21*np.exp(17.502*(T2m-t0)/(T2m-32.19));
qs0 = ratio *es/(p0-es);
cap_gamma = -(g * (1.+(L*qs0)/(R*T2m)) / (cp + (L**2*qs0*ratio) / (R*T2m**2)));
env_gamma = Ndsq*T2m/g + cap_gamma #Ndsq calculated from potential temperature profile cap_gamma converts to real temp?
hw = np.abs((Rv*T2m**2)/(L*env_gamma))
# if env_gamma pulled from model, enforce reasonable values with Ndsq=min(Ndsq,0.012)
# cw below calculated from Trude's profile, Ndsq=0.00011, T2m=271K, p0=820mb, dth/dz=0.004K/m dt/dz=-0.0054
# could calculate Ndsq as = (env_gamma-cap_gamma)*g/T2m
Ndsq = (-0.0054 - cap_gamma) * g / T2m
# -------------------------------------------------------------------------------
# cw = 1.9 # sensitivity (commonly set to 1.1, see paper SB04) = cap_gamma / env_gamma
cw = cap_gamma / env_gamma
# using base_mr from profile, but maybe it should be qs0 above?
cwqv = cw * base_mr # sensitivity times q_vs (set to: 0.01 kg/kg, see paper SB04)
z0 = 0 # at the height (AGL?) where you want the precip
vterm = 2.0 # vertical terminal velocity for e.g. snow = 2m/s rain=10m/s
tauf= hw / vterm # BS'11: =zg/Vt ( =hw/Vt for moisture level: around 500s (->750s) is normally good)
tauc= 2000.0 # cloud->hydrometero conversion time. probably around 500s for rain,
# shorter for warmer condition, longer for snow?
if verbose:
print(" Ndsq=",Ndsq)
print(" Environmental lapse rate=0.004K/m")
print(" \"Dry\" lapse rate=0.0017K/m")
print(" Base MR=",base_mr)
print(" Scale height=",hw)
print(" tauc=",tauc)
print(" tauf=",tauf)
print(" cwqv=",cwqv)
# ---------------------------------------------------------------------------------
params = {"cw":cw,"cwqv":cwqv,"z0":z0,"tauf":tauf,"tauc":tauc,"hw":hw,"Ndsq":Ndsq}
return (x,zs,Fzs,U,dx,params)
def get_params(T2m,U,Ndsq,zs,env_gamma,verbose=False):
"""docstring for get_params"""
Nx = len(zs) # length of domain (grid cells)
# hm = [1800.0,1400.0,1040.0][experiment] # mnt height (m)
# xm = Nx/2.0 # mountain location in domain (grid cell)
# am = [60.0,40.0,3.1][experiment] # mountain half-width (grid cells)
# dx = 2000.0 # grid spacing (m)
# Lx = Nx*dx # length of domain (m)
# x = np.linspace(0,Lx,Nx) # distance array (m)
# zo = [1700.0,2000.0,2200.0][experiment] # mountain base height (m) NOT REALLY USED CORRECTLY YET
zo = 0.0
p0 = 101325 * (1 - 2.25577e-5*zo)**5.25588 # compute base pressure
# p0 = 101325.0
# T2m = 268.0# 270.56 #needs to be selected for each experiment...?
# hw = 4000.0 # scale of water vapor (see formula in SB04,appendix)
# base_mr=[0.00325,0.0032,0.003][experiment] # mixing ratio at the base of the mountain
# base_mr = 0.003255
# base_mr = 0.0025687
# -----------------------------------------------------------------
# put zs in Fourier space
Fzs = np.fft.fftshift(np.fft.fft(zs))/Nx
# linear model paramters (see calculations in SB'04):
# -------------------------------------------------------------------------------
t0 = 273.16
# p0 = 100000
# p0 = 82000.0 (now calculated above from z0)
L = 2.5e6
ratio = 18.015/28.964
R = 287.0
Rv = 461.0
cp = 1004.0
g = 9.81
es = 611.21*np.exp(17.502*(T2m-t0)/(T2m-32.19))
qs0 = ratio *es/(p0-es)
base_mr=qs0
cap_gamma = -(g * (1.+(L*qs0)/(R*T2m)) / (cp + (L**2*qs0*ratio) / (R*T2m**2)))
# env_gamma = Ndsq*T2m/g + cap_gamma #Ndsq calculated from potential temperature profile cap_gamma converts to real temp?
hw = np.abs((Rv*T2m**2)/(L*env_gamma))
# if env_gamma pulled from model, enforce reasonable values with Ndsq=min(Ndsq,0.012)
# cw below calculated from Trude's profile, Ndsq=0.00011, T2m=271K, p0=820mb, dth/dz=0.004K/m dt/dz=-0.0054
# could calculate Ndsq as = (env_gamma-cap_gamma)*g/T2m
# Ndsq=(-0.0054-cap_gamma)*g/T2m
# -------------------------------------------------------------------------------
# cw = 1.9 # sensitivity (commonly set to 1.1, see paper SB04) = cap_gamma / env_gamma
cw = cap_gamma/env_gamma
if verbose:
print(cap_gamma, env_gamma, cw, qs0)
# using base_mr from profile, but maybe it should be qs0 above?
cwqv= cw*base_mr # =sensitivity times q_vs (set to: 0.01 kg/kg, see paper SB04)
# print(cwqv,cw,base_mr,qs0,p0,hw)
z0 = 0 # at the height (AGL?) where you want the precip
vterm = 2.0 # vertical terminal velocity for e.g. snow = 2m/s rain=10m/s
tauf= hw/vterm # BS'11: =zg/Vt ( =hw/Vt for moisture level: around 500s (->750s) is normally good)
tauc= 2000.0 # cloud->hydrometero conversion time. probably around 500s for rain,
# shorter for warmer condition, longer for snow?
if verbose:
print(" Ndsq=",Ndsq)
print(" Environmental lapse rate=0.004K/m")
print(" \"Dry\" lapse rate=0.0017K/m")
print(" Base MR=",base_mr)
print(" Scale height=",hw)
print(" tauc=",tauc)
print(" tauf=",tauf)
print(" cwqv=",cwqv)
# ---------------------------------------------------------------------------------
params = {"cw":cw,"cwqv":cwqv,"z0":z0,"tauf":tauf,"tauc":tauc,"hw":hw,"Ndsq":Ndsq}
return (Fzs,params)
#
# This is a 2-D precipitation solution for the linear model.
#
def solve(Fzs,U,dx,params, zlevels=None):
cw = params.cw #1.1 #efficiency factor
cwqv= params.cwqv
z0 = params.z0 #0 #meters
tauf= params.tauf #1500 #seconds
tauc= params.tauc #1000 #seconds
hw = params.hw #3000 #m water vapor scale height, maybe this shouldn't be in params...
Ndsq= params.Ndsq #0.0011 # 1/s^2 Brunt-Vaisalla frequency squared
Nx=Fzs.shape[0]
m= | np.ones(Nx) | numpy.ones |
import argparse
# import safety_gym
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import time
from safety_gym.envs.engine import Engine
from PIL import Image
from imageio import imwrite
from envs import GoalEnv
from utils.dataset import *
import os
CAMERA_FREE = 0
CAMERA_FIXED = 1
class Maze(GoalEnv):
def __init__(self, num_steps=20000, step_size=50):
super(Maze, self).__init__()
self.goal_im = None
self.step_size = step_size
self.grid_n = 64
self.maze_env = SafetyGymMaze('point')
self.maze_env.reset()
self.maze_env.render(mode='rgb_array', camera_id=2)
# env.render(camera_id=2)
self.maze_env.viewer_setup()
self.maze_env.set_num_steps(num_steps)
self.state = self.maze_env.robot_pos
self.name = 'maze'
def reset(self):
"""Resets the state of the environment and the goal, returning an initial observation (images)."""
self.maze_env.reset()
self.goal_im = self.get_obs()
self.goal_state = self.maze_env.robot_pos
self.maze_env.reset()
return self.get_obs()
def seed(self, seed):
"""Set the random seed."""
np.random.seed(seed)
def get_state(self):
return self.maze_env.robot_pos
def get_obs(self):
"""Return current image observation."""
img = self.maze_env.render(mode='rgb_array', camera_id=2)
img = np.array(Image.fromarray(img).resize((64, 64), resample=Image.NEAREST))
return img
def step(self, action):
"""Run one timestep of the environment's dynamics.
:param action:
:return:
"""
for i in range(self.step_size):
self.maze_env.step(action[i])
def step_sequence(self, action_seq):
for action in action_seq:
self.step(action)
def sample_action(self):
"""Return a uniformly sampled action from the action space"""
action = []
for i in range(self.step_size):
action.append(self.maze_env.action_space.sample())
return action
def reached_goal(self):
"""Return True if the state of the environment matches the goal state"""
pass
class SafetyGymMaze(Engine):
def __init__(self, agent, robot_xy = None):
surrounding_walls = set()
for i in ( | np.linspace(0, 1, 15) | numpy.linspace |
import os
import tarfile
from Bio import SeqIO
import numpy as np
import math
import time
import random
import sys
import logging
#region Logging start
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
handler = logging.FileHandler('results/logger.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
#endregion
AA_list = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
# Custom dictionary, amino acids with similar properties are "close" to one another
AA_to_int = {'X': 0, 'R': 1, 'H': 2, 'K': 3, 'D': 4, 'E': 5, 'S': 6, 'T': 7, 'N': 8, 'Q': 9, 'C': 10,
'G': 11, 'P': 12, 'A': 13, 'V': 14, 'I': 15, 'L': 16, 'M': 17, 'F': 18, 'Y': 19, 'W': 20}
int_to_AA = {x: y for y, x in AA_to_int.items()}
def check_region(region):
if any(x in region for x in ['c','n','h','C']):
return np.array([1])
else:
return np.array([0])
def progress(file_counter, total_file_count, sample_counter):
s = (f"{file_counter} out of {total_file_count} files loaded, "
f"{sample_counter} samples loaded")
print(s, end='\r')
def one_hot_endcoding(vector):
# Stupid keras....
# TO FIGURE OUT THIS BULLSHIT TOOK SO LONG TIME, I FIRST THOUGHT IT WAS NUMPY BUT NOOOO....
for i,j in enumerate(vector):
_hot = [0]*len(AA_to_int.keys())
if j in AA_to_int.keys():
_hot[AA_to_int[j]] = 1. # Add 1. at correct index
else:
_hot[0] = 1. # Add X if unknown AA
vector[i] = _hot
return vector
def load_training(seq_length, data_folder, data_augmentation=False,
fix_samples='IGNORE', equalize_data=False, save_array=True,
use_ascii=False, vectorize=True):
"""Loads traning data into a numpy array.
Ignores files that starts with . since they are config files in ubuntu.
"""
print('Loading data...')
t = time.time()
cur_dir = os.getcwd() # Needed to reset working directory
os.chdir(data_folder) # Go to data folder
sample_counter = 0 # Just to count amount of data
file_counter = 0
total_file_count = 0 # Count total amount of files
for dirpath, dirnames, files in os.walk(os.getcwd(), topdown=True):
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
total_file_count += len([x for x in files if not x.startswith('.')])
progress(file_counter, total_file_count, sample_counter)
big_seq_list = [] # FASTER
big_label_list = [] # FASTER
for (dirpath, dirnames, filenames) in os.walk(os.getcwd(), topdown=True): # Walks through all files and dirs
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
for filename in filenames:
if filename.startswith('.'): # Ignore config files
continue
records = SeqIO.parse(dirpath + '/' + filename, 'fasta')
for record in records:
record = str(record.seq)
record = record.split('#')
full_seq = list(record[0])
# Discard bad data
if len(full_seq) < 2:
continue
# The first amino acid is usually M or not in signal peptide. Ignore it
full_seq = full_seq[1:]
# seqs = list in list
if not data_augmentation:
seqs = [full_seq[:seq_length]]
elif data_augmentation:
# Divide into smaller pieces
seqs = [full_seq[x:x + seq_length] for x in range(0, len(full_seq), seq_length)]
else:
print('No resample method has been choosen')
quit()
if fix_samples == 'LOOP_SEQ':
seqs = [list(x) + (full_seq*(math.ceil(seq_length/(len(full_seq)))))[:seq_length-len(x)]
if len(x) < seq_length
else x for x in seqs]
elif fix_samples == 'ZERO':
seqs = [list(x) + ['X']*(seq_length-len(x))
if len(x) < seq_length
else x for x in seqs]
elif fix_samples == 'IGNORE':
seqs = [x for x in seqs
if len(x) == seq_length]
if seqs == []: # Check for empty lists
continue
elif fix_samples == 'NOISE':
seqs = [x + random.choices(AA_list, k=(seq_length-len(x)))
if len(x) < seq_length
else x for x in seqs]
# Fix Y
if 'positive' in dirpath:
"""No region, assume the first bases are the signal peptide"""
for i in range(len(seqs)):
if i == 0:
big_label_list.append([1.])
else: # When doing data augmentation, this is needed
big_label_list.append([0.])
elif 'negative' in dirpath:
for i in range(len(seqs)):
big_label_list.append([0.])
else:
# Unknown
big_label_list.append([-1.])
# Fix X
if vectorize:
for i,j in enumerate(seqs):
seqs[i] = one_hot_endcoding(j)
elif use_ascii:
# Using ascii numbers, ord('A') = 65
"""Doing this sped up the process by 20 fold!"""
for i,j in enumerate(seqs):
seqs[i] = [float(ord(x)) - 65 for x in j]
elif not use_ascii:
# Using ascii numbers, ord('A') = 65
"""Doing this sped up the process by 20 fold!"""
for i,j in enumerate(seqs):
seqs[i] = [float(AA_to_int[x])
if x in AA_to_int.keys()
else 0 # Fix unknown amino acids
for x in j]
for seq in seqs:
big_seq_list.append(seq) # Needed, since data aug breaks
sample_counter += len(seqs)
# Slows performance, but I still like it here
#progress(file_counter, total_file_count, sample_counter)
file_counter += 1
progress(file_counter, total_file_count, sample_counter)
"""Can be used in future to find which data was tm or not"""
#print(os.path.basename(dirpath))
"""For neg or pos"""
#print(os.path.basename(dirpath))
# Needs to flatten big_seq_list, since it is now a 3 matrix
print('')
logger.info(f'Loaded {sample_counter} samples')
#print('Flattening...')
#big_seq_list = sum(big_seq_list, []) # Flattens list, needed since the code needs list in lists for data aug
print('Converting to numpy array...')
X = | np.array(big_seq_list, dtype=np.float32) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 15 11:29:39 2022
@author: chloechallamel
Code pour le projet M9
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import dm4bem
# Physical values
# ===============
# P-controler gain
# ----------------
Kp = 1e4 # Kp -> ∞ : almost perfect controller
# Kp = 1e2
# Kp = 1e-3 # Kp -> 0 : no controller Kp -> 0
Kp
# Air-flow rate
# -------------
Va_livingroom = 6 * 4 * 2.5 # m³ volume of air in the living room
Va_bathroom = 2 * 4 * 2.5 # m³ volume of air in the bathroom
ACH = 1 # air changes per hour
Va_livingroom_dot = ACH * Va_livingroom / 3600 # m³/s air infiltration
Va_bathroom_dot = ACH * Va_bathroom / 3600
# Thermophysical properties
# ------------------------
air = {'Density': 1.2, # kg/m³
'Specific heat': 1000} # J/kg.K
# Valeurs récupérées de DB
materials = {'Conductivity': [2.3, 0.0457, 0.25, 0.19, 1.4], # W/m.K
'Density': [2300, 40, 2800, 700, 2100], # kg/m³
'Specific heat': [1000, 1450, 896, 2390, 840], # J/kg.K
'Width': [0.3, 0.05, 0.013, 0.035, 0.1], # m
'Slice': [1, 1, 1, 1, 1]} # nb of meshes
materials = pd.DataFrame(
materials, index=['Concrete', 'Insulation', 'Plaster', 'Door',
'Floor'])
# Surfaces
surfaces = {'LR': [13.11, 18.4, 6.6, 1.89, 48], # m²
'BR': [15, 3.8, 1.2, 0, 16],
'LR-BR': [8.11, 0, 0, 1.89, 0]}
surfaces = pd.DataFrame(
surfaces, index=['Dividingwall', 'Wall', 'Window', 'Door', 'Floor'])
# convection coefficients, W/m² K
h = pd.DataFrame([{'in': 4., 'out': 10}])
# Thermal circuit
# ===============
# Thermal conductances
# --------------------
# Conduction
# Conduction zone 1 : LR
# in wall
Gcd_LR_w_c = materials['Conductivity']['Concrete'] / \
materials['Width']['Concrete'] * surfaces['LR']['Wall']
Gcd_LR_w_i = materials['Conductivity']['Insulation'] / \
materials['Width']['Insulation'] * surfaces['LR']['Wall']
Gcd_LR_w_p = materials['Conductivity']['Plaster'] / \
materials['Width']['Plaster'] * surfaces['LR']['Wall']
# in dividing wall
Gcd_LR_dw_p = materials['Conductivity']['Plaster'] / \
materials['Width']['Plaster'] * surfaces['LR']['Dividingwall']
Gcd_LR_dw_i = materials['Conductivity']['Insulation'] / \
materials['Width']['Insulation'] * surfaces['LR']['Dividingwall']
# door
Gcd_LR_d = materials['Conductivity']['Door'] / \
materials['Width']['Door'] * surfaces['LR']['Door']
# floor
Gcd_LR_f = materials['Conductivity']['Floor'] / \
materials['Width']['Floor'] * surfaces['LR']['Floor']
# Conduction zone 2 : BR
# in wall
Gcd_BR_w_c = materials['Conductivity']['Concrete'] / \
materials['Width']['Concrete'] * surfaces['BR']['Wall']
Gcd_BR_w_i = materials['Conductivity']['Insulation'] / \
materials['Width']['Insulation'] * surfaces['BR']['Wall']
Gcd_BR_w_p = materials['Conductivity']['Plaster'] / \
materials['Width']['Plaster'] * surfaces['BR']['Wall']
# in dividing wall
Gcd_BR_dw_p = materials['Conductivity']['Plaster'] / \
materials['Width']['Plaster'] * surfaces['BR']['Dividingwall']
Gcd_BR_dw_i = materials['Conductivity']['Insulation'] / \
materials['Width']['Insulation'] * surfaces['BR']['Dividingwall']
# floor
Gcd_BR_f = materials['Conductivity']['Floor'] / \
materials['Width']['Floor'] * surfaces['BR']['Floor']
# Conduction zone 3 : LR-BR
# in dividing wall
Gcd_LRBR_dw_p = materials['Conductivity']['Plaster'] / \
materials['Width']['Plaster'] * surfaces['LR-BR']['Dividingwall']
Gcd_LRBR_dw_i = materials['Conductivity']['Insulation'] / \
materials['Width']['Insulation'] * surfaces['LR-BR']['Dividingwall']
# door
Gcd_LRBR_d = materials['Conductivity']['Door'] / \
materials['Width']['Door'] * surfaces['LR-BR']['Door']
# Convection
# Convection zone 1 : LR
Gcv_LR_w = h * surfaces['LR']['Wall'] # in wall
Gcv_LR_dw = h * surfaces['LR']['Dividingwall'] # in in dividing wall
Gcv_LR_d = h * surfaces['LR']['Door'] # door
Gcv_LR_f = h * surfaces['LR']['Floor'] # floor
# Convection zone 2 : BR
Gcv_BR_w = h * surfaces['BR']['Wall'] # in wall
Gcv_BR_dw = h * surfaces['BR']['Dividingwall'] # in in dividing wall
Gcv_BR_f = h * surfaces['BR']['Floor'] # floor
# Convection zone 3 : LR-BR
Gcv_LRBR_dw = h * surfaces['LR-BR']['Dividingwall'] # in in dividing wall
Gcv_LRBR_d = h * surfaces['LR-BR']['Door'] # door
# Ventilation & advection
Gv_LR = Va_livingroom_dot * air['Density'] * air['Specific heat']
Gv_BR = Va_bathroom * air['Density'] * air['Specific heat']
# Window : conductance Uw * S
Uw = 1.96 # W/(m2.K) pour double-vitrage selon DB
Gw_LR = Uw * surfaces['LR']['Window']
Gw_BR = Uw * surfaces['BR']['Window']
# Renouvellement d'air
Gr_LR = Gr_BR = Kp
# Thermal capacities
# ------------------
# zone 1 : LR
C_LR_w_c = materials['Density']['Concrete'] * \
materials['Specific heat']['Concrete'] * \
surfaces['LR']['Wall'] * materials['Width']['Concrete']
C_LR_w_i = materials['Density']['Insulation'] * \
materials['Specific heat']['Insulation'] * \
surfaces['LR']['Wall'] * materials['Width']['Insulation']
C_LR_w_p = materials['Density']['Plaster'] * \
materials['Specific heat']['Plaster'] * \
surfaces['LR']['Wall'] * materials['Width']['Plaster']
C_LR_dw_p = materials['Density']['Plaster'] * \
materials['Specific heat']['Plaster'] * \
surfaces['LR']['Dividingwall'] * materials['Width']['Plaster']
C_LR_dw_i = materials['Density']['Insulation'] * \
materials['Specific heat']['Insulation'] * \
surfaces['LR']['Dividingwall'] * materials['Width']['Insulation']
C_LR_d = materials['Density']['Door'] * \
materials['Specific heat']['Door'] * \
surfaces['LR']['Door'] * materials['Width']['Door']
C_LR_f = materials['Density']['Floor'] * \
materials['Specific heat']['Floor'] * \
surfaces['LR']['Floor'] * materials['Width']['Floor']
# zone 2 : BR
C_BR_w_c = materials['Density']['Concrete'] * \
materials['Specific heat']['Concrete'] * \
surfaces['BR']['Wall'] * materials['Width']['Concrete']
C_BR_w_i = materials['Density']['Insulation'] * \
materials['Specific heat']['Insulation'] * \
surfaces['BR']['Wall'] * materials['Width']['Insulation']
C_BR_w_p = materials['Density']['Plaster'] * \
materials['Specific heat']['Plaster'] * \
surfaces['BR']['Wall'] * materials['Width']['Plaster']
C_BR_dw_p = materials['Density']['Plaster'] * \
materials['Specific heat']['Plaster'] * \
surfaces['BR']['Dividingwall'] * materials['Width']['Plaster']
C_BR_dw_i = materials['Density']['Insulation'] * \
materials['Specific heat']['Insulation'] * \
surfaces['BR']['Dividingwall'] * materials['Width']['Insulation']
C_BR_f = materials['Density']['Floor'] * \
materials['Specific heat']['Floor'] * \
surfaces['BR']['Floor'] * materials['Width']['Floor']
# zone 3 : LR-BR
C_LRBR_dw_p = materials['Density']['Plaster'] * \
materials['Specific heat']['Plaster'] * \
surfaces['LR-BR']['Dividingwall'] * materials['Width']['Door']
C_LRBR_dw_i = materials['Density']['Insulation'] * \
materials['Specific heat']['Insulation'] * \
surfaces['LR-BR']['Dividingwall'] * materials['Width']['Insulation']
C_LRBR_d = materials['Density']['Door'] * \
materials['Specific heat']['Door'] * \
surfaces['LR-BR']['Door'] * materials['Width']['Door']
# air
C_LR_air = air['Density'] * air['Specific heat'] * Va_livingroom
C_BR_air = air['Density'] * air['Specific heat'] * Va_bathroom
# Incidence matrix A
# ------------------
A = np.zeros([60, 49])
# Flux directed to livingroom
A[0, 0] = 1
A[1, 0], A[1, 1] = -1, 1
A[2, 1], A[2, 2] = -1, 1
A[3, 2], A[3, 3] = -1, 1
A[4, 3], A[4, 4] = -1, 1
A[5, 4], A[5, 5] = -1, 1
A[6, 5], A[6, 6] = -1, 1
A[7, 6], A[7, 20] = -1, 1
A[8, 7] = 1
A[9, 7], A[9, 8] = -1, 1
A[10, 8], A[10, 9] = -1, 1
A[11, 9], A[11, 20] = -1, 1
A[12, 10] = 1
A[13, 10], A[13, 11] = -1, 1
A[14, 11], A[14, 12] = -1, 1
A[15, 12], A[15, 20] = -1, 1
A[16, 13] = 1
A[17, 13], A[17, 14] = -1, 1
A[18, 14], A[18, 15] = -1, 1
A[19, 15], A[19, 16] = -1, 1
A[20, 16], A[20, 17] = -1, 1
A[21, 17], A[21, 18] = -1, 1
A[22, 18], A[22, 19] = -1, 1
A[23, 19], A[23, 20] = -1, 1
A[24, 20] = 1
A[25, 20] = 1
A[26, 20], A[26, 21] = -1, 1
A[27, 21], A[27, 22] = -1, 1
A[28, 22], A[28, 23] = -1, 1
A[29, 23], A[29, 24] = -1, 1
A[30, 24], A[30, 25] = -1, 1
A[31, 25], A[31, 26] = -1, 1
A[32, 26], A[32, 27] = -1, 1
A[33, 27], A[33, 31] = -1, 1
A[34, 28] = 1
A[35, 28], A[35, 29] = -1, 1
A[36, 29], A[36, 30] = -1, 1
A[37, 30], A[37, 31] = -1, 1
# Flux directed to bathroom
A[38, 31], A[38, 32] = 1, -1
A[39, 32], A[39, 33] = 1, -1
A[40, 33], A[40, 34] = 1, -1
A[41, 34], A[41, 35] = 1, -1
A[42, 35], A[42, 36] = 1, -1
A[43, 36], A[43, 37] = 1, -1
A[44, 37], A[44, 38] = 1, -1
A[45, 38] = 1
A[46, 31], A[46, 39] = 1, -1
A[47, 39], A[47, 40] = 1, -1
A[48, 40], A[48, 41] = 1, -1
A[49, 41] = 1
A[50, 31], A[50, 42] = 1, -1
A[51, 42], A[51, 43] = 1, -1
A[52, 43], A[52, 44] = 1, -1
A[53, 44], A[53, 45] = 1, -1
A[54, 45], A[54, 46] = 1, -1
A[55, 46], A[55, 47] = 1, -1
A[56, 47], A[56, 48] = 1, -1
A[57, 48] = 1
A[58, 31] = 1
A[59, 31] = 1
# Conductance matrix G
# --------------------
G = np.zeros([60, 60])
# Livingroom
# wall (ext)
# convection
G[0, 0] = Gcv_LR_w['out'] # convection wall concrete out
# conduction
G[1, 1] = G[2, 2] = Gcd_LR_w_c # conduction wall concrete
G[3, 3] = G[4, 4] = Gcd_LR_w_i # conduction wall insulation
G[5, 5] = G[6, 6] = Gcd_LR_w_p # conduction wall plaster
# convection
G[7, 7] = Gcv_LR_w['in'] # convection wall plaster inside
# floor
# convection
G[8, 8] = Gcv_LR_f['in'] # convection floor temp fix
# conduction
G[9, 9] = G[10, 10] = Gcd_LR_f # conduction floor
# convection
G[11, 11] = Gcv_LR_f['in'] # convection floor inside
# door
# convection
G[12, 12] = Gcv_LR_d['in'] # convection door temp fix
# conduction
G[13, 13] = G[14, 14] = Gcd_LR_d # conduction door
# convection
G[15, 15] = Gcv_LR_d['in'] # convection door inside
# dividing wall
# convection
G[16, 16] = Gcv_LR_dw['in'] # convection d-wall plaster fix
# conduction
G[17, 17] = G[18, 18] = Gcd_LR_dw_p # conduction d-wall plaster
G[19, 19] = G[20, 20] = Gcd_LR_dw_i # conduction d-wall insulation
G[21, 21] = G[22, 22] = Gcd_LR_dw_p # conduction d-wall plaster
# convection
G[23, 23] = Gcv_LR_dw['in'] # convection wall plaster inside
# window + ventilation
# conduction
G[24, 24] = Gw_LR + Gv_LR # conduction w + venti
# renouvellement air
# conduction
G[25, 25] = Gr_LR # renouv air LR
# partition wall
# convection
G[26, 26] = Gcv_LRBR_dw['in'] # convection d-wall plaster in
# conduction
G[27, 27] = G[28, 28] = Gcd_LRBR_dw_p # conduction d-wall plaster
G[29, 29] = G[30, 30] = Gcd_LRBR_dw_i # conduction d-wall insulation
G[31, 31] = G[32, 32] = Gcd_LRBR_dw_p # conduction d-wall plaster
# convection
G[33, 33] = Gcv_LRBR_dw['in'] # convection wall plaster inside
# door
# convection
G[34, 34] = Gcv_LRBR_d['in'] # convection door temp fix
# conduction
G[35, 35] = G[36, 36] = Gcd_LRBR_d # conduction door
# convection
G[37, 37] = Gcv_LRBR_d['in'] # convection door inside
# Bathroom
# wall (ext)
# convection
G[45, 45] = Gcv_BR_w['out'] # convection wall concrete out
# conduction
G[44, 44] = G[43, 43] = Gcd_BR_w_c # conduction wall concrete
G[42, 42] = G[41, 41] = Gcd_BR_w_i # conduction wall insulation
G[40, 40] = G[39, 39] = Gcd_BR_w_p # conduction wall plaster
# convection
G[38, 38] = Gcv_BR_w['in'] # convection wall plaster inside
# floor
# convection
G[49, 49] = Gcv_BR_f['in'] # convection floor temp fix
# conduction
G[48, 48] = G[47, 47] = Gcd_BR_f # conduction floor
# convection
G[46, 46] = Gcv_BR_f['in'] # convection floor inside
# dividing wall
# convection
G[57, 57] = Gcv_BR_dw['in'] # convection d-wall plaster fix
# conduction
G[56, 56] = G[55, 55] = Gcd_BR_dw_p # conduction d-wall plaster
G[54, 54] = G[53, 53] = Gcd_BR_dw_i # conduction d-wall insulation
G[52, 52] = G[51, 51] = Gcd_BR_dw_p # conduction d-wall plaster
# convection
G[50, 50] = Gcv_BR_dw['in'] # convection wall plaster inside
# window + ventilation
# conduction
G[59, 59] = Gw_BR + Gv_BR # conduction w + venti
# renouvellement air
# conduction
G[58, 58] = Gr_BR # renouv air BR
# Capacity matrix C
# -----------------
C = np.zeros([49, 49])
C_BR_air = C_BR_dw_i = C_BR_w_i = C_LR_dw_i = C_LRBR_dw_i = 0
# C_BR_air = C_BR_dw_i = C_BR_w_i = C_LR_air = C_LR_dw_i = C_LR_w_i = C_LRBR_dw_i = 0
# Livingroom : LR
# wall (ext)
C[1, 1] = C_LR_w_c
C[3, 3] = C_LR_w_i
C[5, 5] = C_LR_w_p
# floor
C[8, 8] = C_LR_f
# dividingwall
C[14, 14] = C[18, 18] = C_LR_dw_p
C[16, 16] = C_LR_dw_i
# door
C[11, 11] = C_LR_d
# air
C[20, 20] = C_LR_air
# Partition wall : LR-BR
# dividingwall
C[22, 22] = C[26, 26] = C_LRBR_dw_p
C[24, 24] = C_LRBR_dw_i
# door
C[29, 29] = C_LRBR_d
# Bathroom
# wall (ext)
C[37, 37] = C_BR_w_c
C[35, 35] = C_BR_w_i
C[33, 33] = C_BR_w_p
# floor
C[40, 40] = C_BR_f
# dividingwall
C[47, 47] = C[43, 43] = C_BR_dw_p
C[45, 45] = C_BR_dw_i
# air
C[31, 31] = C_BR_air
# Vector of temperature sources b
# -------------------------------
b = np.zeros(60)
b[[0, 8, 12, 16, 24, 25, 45, 49, 57, 58, 59]] = 1
# Vector of heat sources f
# ------------------------
f = np.zeros(49)
f[[0, 6, 8, 19, 20, 21, 27, 31, 32, 38, 40, 42]] = 1
# Vector of outputs
# -----------------
y = np.zeros(49) # 1 si on veut que la T sorte, 0 sinon
y[[20, 31]] = 1
# Input vector
# ------------
u = np.hstack([b[np.nonzero(b)], f[np.nonzero(f)]])
# Thermal circuit -> state-space
# ==============================
[As, Bs, Cs, Ds] = dm4bem.tc2ss(A, G, b, C, f, y)
# Maximum time-step
dtmax = min(-2. / np.linalg.eig(As)[0])
print(f'Maximum time step: {dtmax:.2f} s')
dt = 10
# dt = 100
# dt = 360
print(f'Time step: {dt:.2f} s')
# Step response
# -------------
duration = 3600 * 24 * 2 # [s]
# number of steps
n = int( | np.floor(duration / dt) | numpy.floor |
import numpy as np
from scipy.linalg import block_diag
import math
class rJoint:
def __init__(self, alpha, a, theta, d, type, inertia, m, r):
self.alpha = alpha
self.a = a
self.theta = theta
self.d = d
self.type = type
self.inertia = inertia
self.m = m
self.r = r
class cartesian:
def __init__(self, x, y, z, roll, pitch, yaw):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
class fkine:
def __init__(self, T, A, Aout, transl, R, rpy):
self.T = T
self.A = A
self.Aout = Aout
self.transl = transl
self.R = R
self.rpy = rpy
class impedanceController:
def __init__(self):
self.Kd = np.diag(np.array([125,125,125,1,1,1]))
self.Bd = np.diag(np.array([85,85,85,165,165,165]))
self.Md = np.diag(np.array([15, 15, 15, 1, 1, 1]))
# Controllers
def output(self, x, xd, xdd, xc, xcd, F):
Mdinv = np.linalg.inv(self.Md)
damper = np.dot(self.Bd,(xcd - xd))
spring = np.dot(self.Kd,(xc - x))
ax = xdd - np.dot(self.Mdinv,(damper + spring + F))
return ax
class Robot:
def __init__(self, joints, ndof, dh):
self.joints = joints
self.ndof = ndof
self.dh = dh
def inverseDynamics(self, qc, qcdot, qcddot, grav):
qc = qc.reshape((1,self.ndof))
qcdot = qcdot.reshape((1,self.ndof))
qcddot = qcddot.reshape((1,self.ndof))
if self.dh.lower() == 'mdh':
grav = grav.reshape(3)
Q = np.ravel(self.mdh_invdyn(qc, qcdot, qcddot, grav))
else:
grav = grav.reshape((3,1)) # May need to fix this
Q = np.ravel(self.invdyn(qc, qcdot, qcddot, grav))
return Q
def forwardKinematics(self, q):
if self.dh.lower() == 'mdh':
T,A,Aout = self.mdh_Transform(q)
else:
T,A,Aout = self.Transform(q)
transl = self.t2transl(T)
R = self.t2rot(T)
r,p,y = self.r2rpy(R)
rpy = [r,p,y]
kinematics = fkine(T, A, Aout, transl, R, rpy)
return kinematics
def jacobian(self, q):
J = self.calcJac(q)
return J
def jacobianDot(self, q, qd):
Jd = self.calcJacDot(q, qd)
return Jd
# Kinematics
def mdh_Transform(rb, q):
for j in range(rb.ndof):
rb.joints[j].theta = q[j]
A = [0 for i in range(rb.ndof)]
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
th[i] = rb.joints[i].theta
d[i] = rb.joints[i].d
T = np.identity(4)
Aout = []
for i in range(rb.ndof):
ct = np.cos(th[i])
st = np.sin(th[i])
ca = np.cos(alp[i])
sa = np.sin(alp[i])
A[i] = np.array([[ct, -st, 0, a[i]],
[(st * ca), (ct * ca), -sa, (-d[i] * sa)],
[(st * sa), (ct * sa), ca, (d[i] * ca)],
[0, 0, 0, 1]])
Aout.append(np.dot(T, A[i]))
T = np.dot(T, A[i])
return T, A, Aout
def Transform(rb, q):
for j in range(rb.ndof):
rb.joints[j].theta = q[j]
A = [0 for i in range(rb.ndof)]
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
# A = np.zeros(2)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
th[i] = rb.joints[i].theta
d[i] = rb.joints[i].d
T = np.identity(4)
Aout = []
for i in range(rb.ndof):
A[i] = np.array([[np.cos(th[i]), -np.sin(th[i]) * np.cos(alp[i]), np.sin(th[i]) * np.sin(alp[i]), a[i] * np.cos(th[i])],
[np.sin(th[i]), np.cos(th[i]) * np.cos(alp[i]), -np.cos(th[i]) * np.sin(alp[i]), a[i] * np.sin(th[i])],
[0, np.sin(alp[i]), np.cos(alp[i]), d[i]],
[0, 0, 0, 1]])
Aout.append(np.dot(T, A[i]))
T = np.dot(T, A[i])
return T, A, Aout
def t2transl(self, T):
transl = np.ravel(T[:3, 3])
return transl
def t2rot(self, T):
R = T[:3, :3]
return R
def r2eul(self, R):
if (R[0,2] < np.finfo(float).eps and R[1,2] <np.finfo(float).eps):
theta = 0
sp = 0
cp = 1
phi = np.arctan2(cp*R[0,2] + sp*R[1,2], R[2,2])
psi = np.arctan2(-sp*R[0,0] + cp*R[1,0], -sp*R[0,1] + cp*R[1,1])
else:
# sin(theta) > 0
#theta = np.arctan2(R[2,2], np.sqrt(1 - (R[2,2]**2)))
theta = np.arctan2(R[1,2],R[0,2])
sp = np.sin(theta)
cp = np.cos(theta)
phi = np.arctan2(cp*R[0,2] + sp*R[1,2], R[2,2])
psi = np.arctan2(-sp*R[0,0] + cp*R[1,0], -sp*R[0,1] + cp*R[1,1])
return theta, phi, psi
def isclose(self, x, y, rtol=1.e-5, atol=1.e-8):
return abs(x-y) <= atol + rtol * abs(y)
def r2rpy(self, R):
'''
From a paper by <NAME> (undated),
"Computing Euler angles from a rotation matrix
'''
phi = 0.0
if self.isclose(R[2,0],-1.0):
theta = math.pi/2.0
psi = math.atan2(R[0,1],R[0,2])
elif self.isclose(R[2,0],1.0):
theta = -math.pi/2.0
psi = math.atan2(-R[0,1],-R[0,2])
else:
theta = -math.asin(R[2,0])
cos_theta = math.cos(theta)
psi = math.atan2(R[2,1]/cos_theta, R[2,2]/cos_theta)
phi = math.atan2(R[1,0]/cos_theta, R[0,0]/cos_theta)
return psi, theta, phi
def calcInverseKin(self, X):
# Pre solved for the 2-DOF Planar robot.
tx = X[0]
ty = X[1]
tz = X[2]
q1 = 2*np.arctan((7*ty + (- 25*tx**4 - 50*tx**2*ty**2 + 49*tx**2 - 25*ty**4 + 49*ty**2)**(1/2))/(5*tx**2 + 7*tx + 5*ty**2))
q2 = -2*np.arctan((- 25*tx**2 - 25*ty**2 + 49)**(1/2)/(5*(tx**2 + ty**2)**(1/2)))
if np.isnan(q1):
q1 = 0
if np.isnan(q2):
q2 = 0
qc = np.array([q1,q2])
return qc
def calcQd(rb, Xd, qc, rho):
J = rb.calcJac(qc)
Jt = np.transpose(J)
inner = np.linalg.inv( np.dot(J,Jt) + np.dot(np.identity(6),rho) )
Jinv = np.dot(Jt,inner)
TestSingularity = np.linalg.det(J[:2,:2])
if(TestSingularity < (1e-9)) and (TestSingularity > -(1e-9)):
qd = np.array([0,0])
print("in here qd")
else:
qd = np.dot(Jinv[:2,:2],Xd[:2])
return qd
def calcQdd(rb, Xdd, qc, qd):
J = rb.calcJac(qc)
Jd = rb.calcJacDot(qc, qd)
Jdq = np.dot(Jd,qd)
kine = rb.forwardKinematics(qc)
rpy = kine.rpy
A = rb.rpy2Ja(rpy[0],rpy[1],rpy[2])
B = block_diag(np.eye(3),np.linalg.inv(A))
# Jadq = np.dot(B,Jdq)
Ja = np.dot(B,J)
Jpinv = rb.pinv(Ja)
Jpinv = rb.pinv(Ja)
qdd = np.dot(Jpinv, (Xdd - Jdq))
return qdd
def calcQdd3(rb, Xdd, qc, qd):
J = rb.calcJac(qc)
Jd = rb.calcJacDot(qc, qd)
Jdq = np.dot(Jd,qd)
kine = rb.forwardKinematics(qc)
rpy = kine.rpy
A = rb.rpy2Ja(rpy[0],rpy[1],rpy[2])
B = block_diag(np.eye(3),np.linalg.inv(A))
# Jadq = np.dot(B,Jdq)
Ja = np.dot(B,J)
Jpinv = rb.pinv(Ja)
qdd = np.dot(Jpinv[:,:3], (Xdd - Jdq[:3]))
return qdd
def calcXd(rb, qc, qd):
J = rb.calcJac(qc)
kine = rb.forwardKinematics(qc)
rpy = kine.rpy
A = rb.rpy2Ja(rpy[0],rpy[1],rpy[2])
B = block_diag(np.eye(3),np.linalg.inv(A))
# Jadq = np.dot(B,Jdq)
Ja = np.dot(B,J)
xd = np.dot(Ja,qd)
return xd
def calcXd3(rb, qc, qd):
J = rb.calcJac(qc)
# kine = rb.forwardKinematics(qc)
# rpy = kine.rpy
# A = rb.rpy2Ja(rpy[0],rpy[1],rpy[2])
# B = block_diag(np.eye(3),np.linalg.inv(A))
# # Jadq = np.dot(B,Jdq)
# Ja = np.dot(B,J)
xd = np.dot(J,qd)
xd = xd[:3]
return xd
def calcJac(rb, q):
J = np.zeros((6,7))
kine = rb.forwardKinematics(q)
T = kine.T
Aout = kine.Aout
# To simplify the readability:
J1v = np.cross( np.array([0, 0, 1]), T[:3,3])
J1w = np.array([0, 0, 1])
J1 = np.concatenate((J1v,J1w))
J[:,0] = J1
for i in range(1,rb.ndof):
Aframe = Aout[i-1]
Jv = np.cross( (Aframe[:3, 2]), (T[:3, 3] - Aframe[:3, 3]), axis=0)
Jw = Aframe[:3, 2]
Jtemp = np.concatenate((Jv, Jw))
J[:,i] = Jtemp
return J
def calcJacDot(rb, q, qd):
J = np.zeros((6,7))
kine = rb.forwardKinematics(q)
T = kine.T
Aout = kine.Aout
# To simplify the readability (Jacobian for the first joint):
J1v = np.cross(np.array([0, 0, 1]), T[:3,3])
J1w = np.array([0, 0, 1])
J1 = np.concatenate((J1v,J1w))
J[:,0] = J1
# Jacobian computation
# Declaring variables
Jvi, Jwi = np.zeros((3,7)), np.zeros((3,7))
Jvi[:,0], Jwi[:,0] = J1v, J1w
w, z = [], []
z.append( np.array([0, 0, 1]).reshape((3,1)) )
w.append( np.array([0, 0, 1]).reshape((3,1)) )
for i in range(1,rb.ndof):
Aframe = Aout[i-1]
z.append( np.array(Aframe[:3, 2]).reshape((3,1)) )
Jv = np.cross( (Aframe[:3, 2]), (T[:3, 3] - Aframe[:3, 3]), axis=0)
Jw = Aframe[:3, 2]
Jvi[:,i] = Jv
Jwi[:,i] = Jw
Jtemp = np.concatenate((Jv, Jw))
J[:,i] = Jtemp
# w and z (used for Jacobian derivative computation)
# Note to self, be aware of indexing.
wtemp = w[len(w)-1] + np.dot(z[len(z) - 2], qd[i-1])
w.append(wtemp)
# Jacobian derivative computation
beta = np.array(np.dot(Jvi, qd)).reshape((3,1))
Jd = np.zeros((6,7))
for i in reversed(range(1, rb.ndof)):
Aframe = Aout[i-1]
zd = np.cross(w[i-1], z[i-1], axis = 0)
alpha = np.array([0, 0, 0]).reshape((3,1))
for j in range(i):
alpha = alpha + np.dot( np.cross(z[j+1-1], np.array(T[:3, 3] - Aframe[:3, 3]).reshape((3,1)), axis=0), qd[j])
# print "alpha", (alpha), "\n\n"
Jvd = np.cross( zd, (T[:3, 3] - Aframe[:3, 3]), axis=0) + np.cross(z[i-1], (alpha + beta), axis=0)
Jwd = zd
Jtemp = np.concatenate((Jvd, Jwd))
Jd[:,i] = np.ravel(Jtemp)
beta = beta + np.dot(Jvi[:,i-1], qd[i-1]).reshape((3,1))
# cross z0 x beta
Jvd = np.cross(np.array([0, 0, 1]).reshape((3,1)), beta, axis=0)
Jwd = np.array([0, 0, 0]).reshape((3,1))
Jtemp = np.concatenate((Jvd, Jwd))
Jd[:,0] = np.ravel(Jtemp)
return Jd
def eul2Ja(self, phi,theta,psi):
Ja = np.array([[ 0, -np.sin(phi), np.cos(phi) * np.sin(theta)],
[0, np.cos(phi), np.sin(phi) * np.sin(theta)],
[1, 0, np.cos(theta) ]])
return Ja
def rpy2Ja(self, r,p,y):
Ja = np.array([[ 1, 0, np.sin(p)],
[0, np.cos(r), -np.cos(p) * np.sin(r)],
[0, np.sin(r), np.cos(p) * np.cos(r)]])
return Ja
def pinv(self, J):
u, s, vh = np.linalg.svd(J.T, full_matrices=True)
u.shape, s.shape, vh.shape
rho = 4
S2 = np.dot(J.T,0)
for i in range(len(s)):
S2[i,i] = s[i] / (s[i]**2 + rho**2)
JpinvT = np.dot(np.dot(vh.T,S2.T),u.T)
Jpinv = JpinvT.T
return Jpinv
# Dynamics
def mdh_calc_transformation(rb, From, to, qc):
T = np.identity(4)
From = From
to = to
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
th[i] = qc[i]
d[i] = rb.joints[i].d
for i in range(From, to):
ct = np.cos(th[i] + 0)
st = np.sin(th[i] + 0)
ca = np.cos(alp[i])
sa = np.sin(alp[i])
A = np.array([[ct, -st, 0, a[i]],
[(st * ca), (ct * ca), -sa, (-d[i] * sa)],
[(st * sa), (ct * sa), ca, (d[i] * ca)],
[0, 0, 0, 1]])
T = np.dot(T, A)
# print(A)
return T
def mdh_invdyn(rb, qc, qcdot, qcddot, grav):
z0 = np.array([0, 0, 1])
R = np.identity(3)
Q = np.zeros((rb.ndof, 1))
grav = grav.reshape(3)
w = np.zeros((3))
wdot = np.zeros((3))
vdot = grav
Fm = np.empty((3,0))
Nm = np.empty((3,0))
for k in range(1):
q = qc[k, :].reshape((rb.ndof,1))
qdot = qcdot[k, :].reshape((rb.ndof,1))
qddot = qcddot[k, :].reshape((rb.ndof,1))
N_DOFS = rb.ndof
# Forward recursion
for i in range(N_DOFS):
T = rb.mdh_calc_transformation(i, i+1, q)
R = T[:3,:3]
p = np.array([rb.joints[i].a,
-rb.joints[i].d * np.sin(rb.joints[i].alpha),
rb.joints[i].d * np.cos(rb.joints[i].alpha)])
wdot_ = (np.dot(R.T, wdot) +
np.dot(z0,qddot[i,k]) +
np.cross(np.dot(R.T,w), np.dot(z0, qdot[i,k])))
w_ = (np.dot(R.T,w) +
np.dot(z0, qdot[i,k]))
vdot_ = np.dot(R.T, (vdot +
np.cross(wdot, p) +
np.cross(w, np.cross(w, p))))
wdot = wdot_
w = w_
vdot = vdot_
vcdot = (vdot + np.cross(wdot, rb.joints[i].r.reshape(3)) +
(np.cross(w, np.cross(w, rb.joints[i].r.reshape(3)))) )
F = np.dot(rb.joints[i].m, vcdot)
N = np.dot(rb.joints[i].inertia, wdot) + np.cross(w, np.dot(rb.joints[i].inertia, w))
Fm = np.append(Fm, F.reshape((3,1)), axis=1)
Nm = np.append(Nm, N.reshape((3,1)), axis=1)
n = np.zeros(3)
f = np.zeros(3)
# Backward recursion
for i in reversed(range(N_DOFS)):
if i+1 < N_DOFS:
p = np.array([[rb.joints[i+1].a], [-rb.joints[i+1].d * np.sin(rb.joints[i+1].alpha)],[rb.joints[i+1].d * np.cos(rb.joints[i+1].alpha)]])
T = rb.mdh_calc_transformation(i+1, i+2, q)
R = T[:3, :3]
else:
R = np.eye(3)
p = np.zeros(3).reshape(3,1)
n_ =(np.dot(R, n) +
np.cross(rb.joints[i].r.reshape(3), Fm[:,i]) +
np.cross(p.reshape(3), np.dot(R,f)) +
Nm[:,i] )
f_ = np.dot(R, f) + Fm[:,i]
n = n_
f = f_
Q[i,k] = np.dot(n.T, z0)
return Q
def calc_transformation(rb, From, to, qc):
T = np.identity(4)
From = From +1
to = to +1
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
# th[i] = rb.joints[i].theta
# Since it is revolute:
th[i] = qc[i]
d[i] = rb.joints[i].d
for i in range(From, to):
ct = np.cos(th[i] + 0)
st = np.sin(th[i] + 0)
ca = np.cos(alp[i])
sa = np.sin(alp[i])
A = np.array([[ct, -st * ca, st*sa, a[i]*ct],
[st, ct * ca, -ct * sa, a[i] * st],
[0, sa, ca, d[i]],
[0, 0, 0, 1]])
T = np.dot(T, A)
# print(A)
return T
def invdyn(rb, qc, qcdot, qcddot, grav):
z0 = np.array([[0], [0], [1]])
R = | np.identity(3) | numpy.identity |
import numpy as np
from ..utils import product_matrix_vector
def fast_wilcoxon(X, y=None, zero_method='wilcox', correction=False,
n_jobs=-1):
from mne.parallel import parallel_func
if y is not None:
X -= y
dims = X.shape
X = X.reshape(len(X), -1)
parallel, p_time_gen, n_jobs = parallel_func(_loop_wilcoxon, n_jobs)
n_chunks = np.min([n_jobs, X.shape[1]])
out = parallel(p_time_gen(X[..., chunk],
zero_method=zero_method, correction=correction)
for chunk in np.array_split(range(X.shape[1]), n_chunks))
stats, p_val = map(list, zip(*out))
stats = np.hstack(stats).reshape(dims[1:])
p_val = np.hstack(p_val).reshape(dims[1:])
return stats, p_val
def _loop_wilcoxon(X, zero_method, correction):
from scipy.stats import wilcoxon
p_val = np.ones(X.shape[1])
stats = np.ones(X.shape[1])
for ii, x in enumerate(X.T):
stats[ii], p_val[ii] = wilcoxon(x)
return stats, p_val
def corr_linear_circular(X, alpha):
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licence : BSD-simplified
"""
Parameters
----------
X : numpy.array, shape (n_angles, n_dims)
The linear data
alpha : numpy.array, shape (n_angles,)
The angular data (if n_dims == 1, repeated across all x dimensions)
Returns
-------
R : numpy.array, shape (n_dims)
R values
R2 : numpy.array, shape (n_dims)
R square values
p_val : numpy.array, shape (n_dims)
P values
Adapted from:
Circular Statistics Toolbox for Matlab
By <NAME>, 2009
<EMAIL> - www.kyb.mpg.de/~berens/circStat.html
Equantion 27.47
"""
from scipy.stats import chi2
import numpy as np
# computes correlation for sin and cos separately
rxs = repeated_corr(X, np.sin(alpha))
rxc = repeated_corr(X, np.cos(alpha))
rcs = repeated_corr(np.sin(alpha), np.cos(alpha))
# tile alpha across multiple dimension without requiring memory
if X.ndim > 1 and alpha.ndim == 1:
rcs = rcs[:, np.newaxis]
# Adapted from equation 27.47
R = (rxc ** 2 + rxs ** 2 - 2 * rxc * rxs * rcs) / (1 - rcs ** 2)
# JR adhoc way of having a sign....
R = np.sign(rxs) * np.sign(rxc) * R
R2 = np.sqrt(R ** 2)
# Get degrees of freedom
n = len(alpha)
pval = 1 - chi2.cdf(n * R2, 2)
return R, R2, pval
def corr_circular_linear(alpha, X):
# Authors: <NAME> <<EMAIL>>
#
# Licence : BSD-simplified
"""
Parameters
----------
alpha : numpy.array, shape (n_angles,)
The angular data (if n_dims == 1, repeated across all x dimensions)
X : numpy.array, shape (n_angles, n_dims)
The linear data
Returns
-------
R : numpy.array, shape (n_dims)
R values
R2 : numpy.array, shape (n_dims)
R square values
p_val : numpy.array, shape (n_dims)
P values
Adapted from:
Circular Statistics Toolbox for Matlab
By <NAME>, 2009
<EMAIL> - www.kyb.mpg.de/~berens/circStat.html
Equantion 27.47
"""
from scipy.stats import chi2
from jr.utils import pairwise
import numpy as np
# computes correlation for sin and cos separately
# WIP Applies repeated correlation if X is vector
# TODO: deals with non repeated correlations (X * ALPHA)
if alpha.ndim > 1:
rxs = repeated_corr(np.sin(alpha), X)
rxc = repeated_corr(np.cos(alpha), X)
rcs = np.zeros_like(alpha[0, :])
rcs = pairwise(np.sin(alpha), np.cos(alpha), func=_loop_corr,
n_jobs=-1)
else:
# WIP Applies repeated correlation if alpha is vector
rxs = repeated_corr(X, np.sin(alpha))
rxc = repeated_corr(X, np.cos(alpha))
rcs = repeated_corr(np.sin(alpha), np.cos(alpha))
# Adapted from equation 27.47
R = (rxc ** 2 + rxs ** 2 - 2 * rxc * rxs * rcs) / (1 - rcs ** 2)
# JR adhoc way of having a sign....
R = np.sign(rxs) * np.sign(rxc) * R
R2 = np.sqrt(R ** 2)
# Get degrees of freedom
n = len(X)
pval = 1 - chi2.cdf(n * R2, 2)
return R, R2, pval
def _loop_corr(X, Y):
R = np.zeros(X.shape[1])
for ii, (x, y) in enumerate(zip(X.T, Y.T)):
R[ii] = repeated_corr(x, y)
return R
def repeated_corr(X, y, dtype=float):
"""Computes pearson correlations between a vector and a matrix.
Adapted from Jona-Sassenhagen's PR #L1772 on mne-python.
Parameters
----------
X : np.array, shape (n_samples, n_measures)
Data matrix onto which the vector is correlated.
y : np.array, shape (n_samples)
Data vector.
dtype : type, optional
Data type used to compute correlation values to optimize memory.
Returns
-------
rho : np.array, shape (n_measures)
"""
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
shape = X.shape
X = np.reshape(X, [shape[0], -1])
if X.ndim not in [1, 2] or y.ndim != 1 or X.shape[0] != y.shape[0]:
raise ValueError('y must be a vector, and X a matrix with an equal'
'number of rows.')
if X.ndim == 1:
X = X[:, None]
ym = np.array(y.mean(0), dtype=dtype)
Xm = np.array(X.mean(0), dtype=dtype)
y -= ym
X -= Xm
y_sd = y.std(0, ddof=1)
X_sd = X.std(0, ddof=1)[:, None if y.shape == X.shape else Ellipsis]
R = (np.dot(y.T, X) / float(len(y) - 1)) / (y_sd * X_sd)
R = np.reshape(R, shape[1:])
# cleanup variable changed in place
y += ym
X += Xm
return R
def repeated_spearman(X, y, dtype=None):
"""Computes spearman correlations between a vector and a matrix.
Parameters
----------
X : np.array, shape (n_samples, n_measures ...)
Data matrix onto which the vector is correlated.
y : np.array, shape (n_samples)
Data vector.
dtype : type, optional
Data type used to compute correlation values to optimize memory.
Returns
-------
rho : np.array, shape (n_measures)
"""
from scipy.stats import rankdata
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
shape = X.shape
X = np.reshape(X, [shape[0], -1])
if X.ndim not in [1, 2] or y.ndim != 1 or X.shape[0] != y.shape[0]:
raise ValueError('y must be a vector, and X a matrix with an equal'
'number of rows.')
# Rank
X = np.apply_along_axis(rankdata, 0, X)
y = np.apply_along_axis(rankdata, 0, y)
# Double rank to ensure that normalization step of compute_corr
# (X -= mean(X)) remains an integer.
X *= 2
y *= 2
X = np.array(X, dtype=dtype)
y = np.array(y, dtype=dtype)
R = repeated_corr(X, y, dtype=type(y[0]))
R = np.reshape(R, shape[1:])
return R
def corr_circular(ALPHA1, alpha2, axis=0):
""" Circular correlation coefficient for two circular random variables.
Input:
------
ALPHA1 : np.array, shape[axis] = n
The matrix
alpha2 : np.array, shape (n), or shape == ALPHA1.shape
Vector or matrix
axis : int
The axis used to estimate correlation
Returns
-------
Y : np.array, shape == X.shape
Adapted from pycircstat by <NAME> :
1. Less memory consuming than original
2. supports ALPHA1 as matrix and alpha2 as vector
https://github.com/circstat/pycircstat
References: [Jammalamadaka2001]_
"""
# center data on circular mean
def sin_center(alpha):
m = np.arctan2(np.mean(np.sin(alpha), axis=axis),
np.mean(np.cos(alpha), axis=axis))
return np.sin((alpha - m) % (2 * np.pi))
sin_alpha1 = sin_center(ALPHA1)
sin_alpha2 = sin_center(alpha2)
# compute correlation coeffcient from p. 176
if sin_alpha1.ndim == sin_alpha2.ndim:
num = np.sum(sin_alpha1 * sin_alpha2, axis=axis)
den = np.sqrt(np.sum(sin_alpha1 ** 2, axis=axis) *
np.sum(sin_alpha2 ** 2, axis=axis))
else:
num = np.sum(product_matrix_vector(sin_alpha1, sin_alpha2, axis=axis))
den = np.sqrt(np.sum(sin_alpha1 ** 2, axis=axis) *
np.sum(sin_alpha2 ** 2))
return num / den
def robust_mean(X, axis=None, percentile=[5, 95]):
X = np.array(X)
axis_ = axis
# force axis to be 0 for facilitation
if axis is not None and axis != 0:
X = np.transpose(X, [axis] + range(0, axis) + range(axis+1, X.ndim))
axis_ = 0
mM = np.percentile(X, percentile, axis=axis_)
indices_min = np.where((X - mM[0][np.newaxis, ...]) < 0)
indices_max = np.where((X - mM[1][np.newaxis, ...]) > 0)
X[indices_min] = np.nan
X[indices_max] = np.nan
m = np.nanmean(X, axis=axis_)
return m
def fast_mannwhitneyu(Y, X, use_continuity=True, n_jobs=-1):
from mne.parallel import parallel_func
X = np.array(X)
Y = np.array(Y)
nx, ny = len(X), len(Y)
dims = X.shape
X = np.reshape(X, [nx, -1])
Y = np.reshape(Y, [ny, -1])
parallel, p_time_gen, n_jobs = parallel_func(_loop_mannwhitneyu, n_jobs)
n_chunks = np.min([n_jobs, X.shape[1]])
chunks = np.array_split(range(X.shape[1]), n_chunks)
out = parallel(p_time_gen(X[..., chunk],
Y[..., chunk], use_continuity=use_continuity)
for chunk in chunks)
# Unpack estimators into time slices X folds list of lists.
U, p_value = map(list, zip(*out))
U = np.hstack(U).reshape(dims[1:])
p_value = np.hstack(p_value).reshape(dims[1:])
AUC = U / (nx * ny)
# XXX FIXME this introduces a bug
# # correct directionality of U stats imposed by mannwhitneyu
# if nx > ny:
# AUC = 1 - AUC
return U, p_value, AUC
def _loop_mannwhitneyu(X, Y, use_continuity=True):
n_col = X.shape[1]
U, P = np.zeros(n_col), np.zeros(n_col)
for ii in range(n_col):
try:
U[ii], P[ii] = mannwhitneyu(X[:, ii], Y[:, ii], use_continuity)
except ValueError as e:
if e.message == 'All numbers are identical in amannwhitneyu':
U[ii], P[ii] = .5 * len(X) * len(Y), 1.
else:
raise ValueError(e.message)
return U, P
def dPrime(hits, misses, fas, crs):
from scipy.stats import norm
from math import exp, sqrt
Z = norm.ppf
hits, misses, fas, crs = float(hits), float(misses), float(fas), float(crs)
# From <NAME> : lindeloev.net/?p=29
# Floors an ceilings are replaced by half hits and half FA's
halfHit = 0.5 / (hits + misses)
halfFa = 0.5 / (fas + crs)
# Calculate hitrate and avoid d' infinity
hitRate = hits / (hits + misses)
if hitRate == 1:
hitRate = 1 - halfHit
if hitRate == 0:
hitRate = halfHit
# Calculate false alarm rate and avoid d' infinity
faRate = fas/(fas+crs)
if faRate == 1:
faRate = 1 - halfFa
if faRate == 0:
faRate = halfFa
# Return d', beta, c and Ad'
out = {}
out['d'] = Z(hitRate) - Z(faRate)
out['beta'] = exp(Z(faRate)**2 - Z(hitRate)**2)/2
out['c'] = -(Z(hitRate) + Z(faRate))/2
out['Ad'] = norm.cdf(out['d']/sqrt(2))
return out
def mannwhitneyu(x, y, use_continuity=True):
"""Adapated from scipy.stats.mannwhitneyu but includes direction of U"""
from scipy.stats import rankdata, tiecorrect
from scipy.stats import distributions
from numpy import asarray
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
if use_continuity:
# normal approximation for prob calc with continuity correction
z = abs((u1 - 0.5 - n1*n2/2.0) / sd)
else:
z = abs((u1 - n1*n2/2.0) / sd) # normal approximation for prob calc
return u2, distributions.norm.sf(z)
def nested_analysis(X, df, condition, function=None, query=None,
single_trial=False, y=None, n_jobs=-1):
""" Apply a nested set of analyses.
Parameters
----------
X : np.array, shape(n_samples, ...)
Data array.
df : pandas.DataFrame
Condition DataFrame
condition : str | list
If string, get the samples for each unique value of df[condition]
If list, nested call nested_analysis.
query : str | None, optional
To select a subset of trial using pandas.DataFrame.query()
function : function
Computes across list of evoked. Must be of the form:
function(X[:], y[:])
y : np.array, shape(n_conditions)
n_jobs : int
Number of core to compute the function. Defaults to -1.
Returns
-------
scores : np.array, shape(...)
The results of the function
sub : dict()
Contains results of sub levels.
"""
import numpy as np
from jr.utils import pairwise
if isinstance(condition, str):
# Subselect data using pandas.DataFrame queries
sel = range(len(X)) if query is None else df.query(query).index
X = X.take(sel, axis=0)
y = np.array(df[condition][sel])
# Find unique conditions
values = list()
for ii in np.unique(y):
if (ii is not None) and (ii not in [np.nan]):
try:
if np.isnan(ii):
continue
else:
values.append(ii)
except TypeError:
values.append(ii)
# Subsubselect for each unique condition
y_sel = [np.where(y == value)[0] for value in values]
# Mean condition:
X_mean = np.zeros(np.hstack((len(y_sel), X.shape[1:])))
y_mean = np.zeros(len(y_sel))
for ii, sel_ in enumerate(y_sel):
X_mean[ii, ...] = np.mean(X[sel_, ...], axis=0)
if isinstance(y[sel_[0]], str):
y_mean[ii] = ii
else:
y_mean[ii] = y[sel_[0]]
if single_trial:
X = X.take(np.hstack(y_sel), axis=0) # ERROR COME FROM HERE
y = y.take(np.hstack(y_sel), axis=0)
else:
X = X_mean
y = y_mean
# Store values to keep track
sub_list = dict(X=X_mean, y=y_mean, sel=sel, query=query,
condition=condition, values=values,
single_trial=single_trial)
elif isinstance(condition, list):
# If condition is a list, we must recall the function to gather
# the results of the lower levels
sub_list = list()
X_list = list() # FIXME use numpy array
for subcondition in condition:
scores, sub = nested_analysis(
X, df, subcondition['condition'], n_jobs=n_jobs,
function=subcondition.get('function', None),
query=subcondition.get('query', None))
X_list.append(scores)
sub_list.append(sub)
X = np.array(X_list)
if y is None:
y = np.arange(len(condition))
if len(y) != len(X):
raise ValueError('X and y must be of identical shape: ' +
'%s <> %s') % (len(X), len(y))
sub_list = dict(X=X, y=y, sub=sub_list, condition=condition)
# Default function
function = _default_analysis if function is None else function
scores = pairwise(X, y, function, n_jobs=n_jobs)
return scores, sub_list
def _default_analysis(X, y):
# from sklearn.metrics import roc_auc_score
from jr.stats import fast_mannwhitneyu
# Binary contrast
unique_y = np.unique(y)
# if two condition, can only return contrast
if len(y) == 2:
y = np.where(y == unique_y[0], 1, -1)
return np.mean(X * y[:, np.newaxis], axis=0)
elif len(unique_y) == 2:
# if two conditions but multiple trials, can return AUC
# auc = np.zeros_like(X[0])
_, _, auc = fast_mannwhitneyu(X[y == unique_y[0], ...],
X[y == unique_y[1], ...], n_jobs=1)
# for ii, x in enumerate(X.T):
# auc[ii] = roc_auc_score(y, np.copy(x))
return auc
# Linear regression:
elif len(unique_y) > 2:
return repeated_spearman(X, y)
else:
raise RuntimeError('Please specify a function for this kind of data')
def median_abs_deviation(x, axis=None):
"""median absolute deviation"""
x = np.asarray(x)
# transpose selected axis in front
shape = x.shape
n_dim = len(shape)
axis_ = None
if axis is not None:
dim_order = np.hstack((axis, np.delete(np.arange(n_dim), axis)))
x = np.transpose(x, dim_order)
axis_ = 0
# compute median
center = np.median(x, axis=axis_, keepdims=False)
if len(center):
center = center[np.newaxis, ...]
# compute median absolute deviation from median
mad = np.median(np.abs(x - center), axis=axis_)
return mad
def cross_correlation_fft(a, b, mode='valid'):
"""Cross correlation between two 1D signals. Similar to np.correlate, but
faster.
Parameters
----------
a : np.array, shape(n)
b : np.array, shape(m)
If len(b) > len(a), a, b = b, a
Output
------
r : np.array
Correlation coefficients. Shape depends on mode.
"""
from scipy import signal
a = np.asarray(a)
b = np.asarray(b)
if np.prod(a.ndim) > 1 or np.prod(b.ndim) > 1:
raise ValueError('Can only vectorize vectors')
if len(b) > len(a):
a, b = b, a
n = len(a)
# Pad vector
c = np.hstack((np.zeros(n/2), b, np.zeros(n/2 + len(a) - len(b) + 1)))
# Convolution of reverse signal:
return signal.fftconvolve(c, a[::-1], mode=mode)
def align_signals(a, b):
"""Finds optimal delay to align two 1D signals
maximizes hstack((zeros(shift), b)) = a
Parameters
----------
a : np.array, shape(n)
b : np.array, shape(m)
Output
------
shift : int
Integer that maximizes hstack((zeros(shift), b)) - a = 0
"""
# check inputs
a = np.asarray(a)
b = np.asarray(b)
if np.prod(a.ndim) > 1 or np.prod(b.ndim) > 1:
raise ValueError('Can only vectorize vectors')
# longest first
sign = 1
if len(b) > len(a):
sign = -1
a, b = b, a
r = cross_correlation_fft(a, b)
shift = np.argmax(r) - len(a) + len(a) / 2
# deal with odd / even lengths (b doubles in size by cross_correlation_fft)
if len(a) % 2 and len(b) % 2:
shift += 1
if len(a) > len(b) and len(a) % 2 and not(len(b) % 2):
shift += 1
return sign * shift
def cross_correlation(x, y, maxlag):
"""
Cross correlation with a maximum number of lags.
`x` and `y` must be one-dimensional numpy arrays with the same length.
This computes the same result as
numpy.correlate(x, y, mode='full')[len(a)-maxlag-1:len(a)+maxlag]
The return vaue has length 2*maxlag + 1.
Author: http://stackoverflow.com/questions/30677241
<NAME>
"""
from numpy.lib.stride_tricks import as_strided
def _check_arg(x, xname):
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('%s must be one-dimensional.' % xname)
return x
x = _check_arg(x, 'x')
y = _check_arg(y, 'y')
py = np.pad(y.conj(), 2*maxlag, mode='constant')
T = as_strided(py[2*maxlag:], shape=(2*maxlag+1, len(y) + 2*maxlag),
strides=(-py.strides[0], py.strides[0]))
px = np.pad(x, maxlag, mode='constant')
return T.dot(px)
class ScoringAUC():
"""Score AUC for multiclass problems.
Average of one against all.
"""
def __call__(self, clf, X, y, **kwargs):
from sklearn.metrics import roc_auc_score
# Generate predictions
if hasattr(clf, 'decision_function'):
y_pred = clf.decision_function(X)
elif hasattr(clf, 'predict_proba'):
y_pred = clf.predict_proba(X)
else:
y_pred = clf.predict(X)
# score
classes = set(y)
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
_score = list()
for ii, this_class in enumerate(classes):
_score.append(roc_auc_score(y == this_class,
y_pred[:, ii]))
if (ii == 0) and (len(classes) == 2):
_score[0] = 1. - _score[0]
break
return np.mean(_score, axis=0)
if __name__ == '__main__':
from sklearn.model_selection import cross_val_score, KFold
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.preprocessing import LabelBinarizer
x = np.random.randn(100, 10)
y = | np.random.randint(0, 2, 100) | numpy.random.randint |
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy
import rospy
import tf.transformations as trans
from PID import PIDRegulator
from dynamic_reconfigure.server import Server
from uuv_control_cascaded_pid.cfg import PositionControlConfig
import geometry_msgs.msg as geometry_msgs
from nav_msgs.msg import Odometry
from rospy.numpy_msg import numpy_msg
class PositionControllerNode:
def __init__(self):
print('PositionControllerNode: initializing node')
self.config = {}
self.pos_des = numpy.zeros(3)
self.quat_des = numpy.array([0, 0, 0, 1])
self.initialized = False
# Initialize pids with default parameters
self.pid_rot = PIDRegulator(1, 0, 0, 1)
self.pid_pos = PIDRegulator(1, 0, 0, 1)
# ROS infrastructure
self.sub_cmd_pose = rospy.Subscriber('cmd_pose', numpy_msg(geometry_msgs.Pose), self.cmd_pose_callback)
self.sub_odometry = rospy.Subscriber('odom', numpy_msg(Odometry), self.odometry_callback)
self.pub_cmd_vel = rospy.Publisher('cmd_vel', geometry_msgs.Twist, queue_size=10)
self.srv_reconfigure = Server(PositionControlConfig, self.config_callback)
def cmd_pose_callback(self, msg):
"""Handle updated set pose callback."""
# Just store the desired pose. The actual control runs on odometry callbacks
p = msg.position
q = msg.orientation
self.pos_des = | numpy.array([p.x, p.y, p.z]) | numpy.array |
# coding: utf-8
#
# Copyright © 2016 <NAME>
import time
from settings import *
from gui import mapgui, updategui
import pygame
import random
import numpy as np
import math
def engine(ind, map):
screen = mapgui(ind_data)
running = True
# Simulation loop
while running:
start_time = time.time()
# Fresh white screen
screen.fill((252, 251, 251))
for key, i in ind.items():
# Boids in seeing range
close_inds = i.get_close_inds(setting_data.get_seeing_range())
# Too close boids
too_close_inds = i.get_close_inds(setting_data.get_personal_space())
# Get center point of individuals close by
center = i.get_center(close_inds)
# Get center point of too close individuals
center_to_avoid = i.get_center(too_close_inds)
# Get angles to the center points
angle = i.get_angle(center)
angle_to_avoid = i.get_angle(center_to_avoid)
# Update boid orientation
i.set_orientation(angle, angle_to_avoid + 180, close_inds)
# Move boid
i.move()
# Update individual position in simulation screen
updategui(ind_data[key], screen)
# Render
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
end_time = time.time()
# FPS limit
sleep_time = (1 / FRAMES_PER_SECOND) - (end_time - start_time)
if sleep_time > 0:
time.sleep(sleep_time)
def get_random_position():
posx = int(MAP_WIDTH * random.random())
posy = int(MAP_HEIGHT * random.random())
coordinates = (posx, posy)
return coordinates
def get_random_velocity():
return MIN_VELOCITY + (MAX_VELOCITY-MIN_VELOCITY) * random.random()
def get_random_orientation():
orientation = 360 * random.random()
return orientation
def turn_to(target_angle, own_angle):
a = - target_angle + own_angle
a = (a + 180) % 360 - 180
if a < 0:
own_angle = (own_angle + TURNING_SPEED) % 360
elif a > 0:
own_angle = own_angle - TURNING_SPEED
if own_angle < 0:
own_angle += 360
return own_angle
def get_circular_mean(array_of_angles, id=None):
ang1 = np.sum(np.cos(array_of_angles * np.pi / 180))
ang2 = np.sum(np.sin(array_of_angles * np.pi / 180))
ori_mean = np.arctan2(ang2, ang1) * 180 / np.pi
if ori_mean == 0 and id is not None:
return ind_data[id][ORIE]
if ori_mean < 0:
ori_mean += 360
return ori_mean
def get_circular_mean2(angle_to_center, angle_mean_ori, angle_to_avoid):
x = y = 0.
angles = [angle_to_center, angle_mean_ori, angle_to_avoid]
weights = [setting_data.get_cohesion(), setting_data.get_alignment(), setting_data.get_separation()]
for angle, weight in zip(angles, weights):
x += math.cos(math.radians(angle)) * weight
y += math.sin(math.radians(angle)) * weight
mean = math.degrees(math.atan2(y, x))
if mean == 0:
mean = None
return mean
def get_final_angle(angle_own, angle_to_center, angle_to_avoid, angle_mean_ori):
if np.isnan(angle_to_center):
angle_to_center = angle_own
if | np.isnan(angle_to_avoid) | numpy.isnan |
"""
This module provides a name_to_constructor dict for all models/estimators in scikit-learn, plus a couple test models and
error handling functions
"""
import warnings
import inspect
import sklearn.base
import sklearn.utils.testing
import joblib
import numpy as np
import os
# Sometimes xgboost is hard to install so make it optional
try:
import xgboost as xgb
except:
pass
import keras
from keras.models import model_from_json
from keras.models import load_model
from keras.models import Sequential
import random
random.seed(0)
import pandas as pd
#from . import keras_models
from mastml import utils
import pickle
from scipy import stats
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
name_to_constructor = dict(sklearn.utils.testing.all_estimators())
class AlwaysFive(sklearn.base.RegressorMixin):
"""
Class used as a test model that always predicts a value of 5.
Args:
constant: (int), the value to predict. Always 5 by default
Methods:
fit: Just passes through to maintain scikit-learn structure
predict: Provides predicted model values based on X features
Args:
X: (numpy array), array of X features
Returns:
(numpy array), prediction array where all values are equal to constant
"""
def __init__(self, constant = 5):
self.five = constant
def fit(self, X, y, groups=None):
return self
def predict(self, X):
return np.array([self.five for _ in range(len(X))])
class RandomGuesser(sklearn.base.RegressorMixin):
"""
Class used as a test model that always predicts random values for y data.
Args:
None
Methods:
fit: Constructs possible predicted values based on y data
Args:
y: (numpy array), array of y data
predict: Provides predicted model values based on X features
Args:
X: (numpy array), array of X features
Returns:
(numpy array), prediction array where all values are random selections of y data
"""
def __init__(self):
pass
def fit(self, X, y, groups=None):
self.possible_answers = y
return self
def predict(self, X):
return np.random.choice(self.possible_answers, size=X.shape[0])
class KerasRegressor():
def __init__(self, conf_dict):
self.conf_dict = conf_dict
self.model = self.build_model()
def build_model(self):
model_vals = self.conf_dict
model = Sequential()
for layer_dict, layer_val in model_vals.items():
if (layer_dict != 'FitParams'):
layer_type = layer_val.get('layer_type')
layer_name_asstr = layer_type
if layer_name_asstr == 'Dense':
neuron_num = int(layer_val.get('neuron_num'))
if (layer_dict == 'Layer1'):
input_dim = int(layer_val.get('input_dim'))
kernel_initializer = layer_val.get('kernel_initializer')
activation = layer_val.get('activation')
elif layer_name_asstr == 'Dropout':
rate = float(layer_val.get('rate'))
for layer_name, cls in inspect.getmembers(keras.layers, inspect.isclass):
layer_type = getattr(keras.layers, layer_name_asstr) # (neuron_num)
else:
if layer_val.get('rate'):
self.rate = float(layer_val.get('rate'))
if layer_val.get('epochs'):
self.epochs = int(layer_val.get('epochs'))
else:
self.epochs = 1
if layer_val.get('batch_size'):
self.batch_size = int(layer_val.get('batch_size'))
else:
self.batch_size = None
if layer_val.get('loss'):
self.loss = str(layer_val.get('loss'))
else:
self.loss = 'mean_squared_error'
if layer_val.get('optimizer'):
self.optimizer = str(layer_val.get('optimizer'))
else:
self.optimizer = 'adam'
if layer_val.get('metrics'):
self.metrics = layer_val.get('metrics').split(',')
else:
self.metrics = ['mae']
if layer_val.get('verbose'):
self.verbose = str(layer_val.get('verbose'))
else:
self.verbose = 0
if layer_val.get('shuffle'):
self.shuffle = bool(layer_val.get('shuffle'))
else:
self.shuffle = True
if layer_val.get('validation_split'):
self.validation_split = float(layer_val.get('validation_split'))
else:
self.validation_split = 0.0
continue
if (layer_dict == 'Layer1'):
model.add(layer_type(neuron_num, input_dim=input_dim, kernel_initializer=kernel_initializer,
activation=activation))
else:
if layer_name_asstr == 'Dense':
model.add(layer_type(neuron_num, kernel_initializer=kernel_initializer, activation=activation))
if layer_name_asstr == 'Dropout':
model.add(layer_type(rate=rate))
return model
def fit(self, X, Y):
# Need to rebuild and re-compile model at every fit instance so don't have information of weights from other fits
self.model = self.build_model()
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
return self.model.fit(X, Y, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose,
validation_split=self.validation_split, shuffle=self.shuffle)
def predict(self, X):
return self.model.predict(X)
def summary(self):
return self.model.summary()
# ref: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingRegressor.html#sklearn.ensemble.BaggingRegressor
# NOTE: in order to use this, other models for the custom ensemble must be defined
# in the conf file with "_ensemble" somewhere in the name
class EnsembleRegressor():
def __init__(self, num_samples, model_list, num_models):
self.model_list = model_list # should be list of strings
self.num_models = num_models # how many of each of the specified models should be included in the ensemble
self.n_estimators = sum(self.num_models)
self.num_samples = num_samples
self.max_samples = num_samples
self.bootstrapped_datasets = []
self.bootstrapped_idxs = []
self.all_preds = []
self.path = ""
self.model = self.build_models() # actually a list of models for use as the members in the ensemble
self.fold = -1
self.bootstrap = True
def build_models(self):
model = []
for i, num_m in enumerate(self.num_models):
for j in range(num_m):
model.append(self.model_list[i])
return model
def setup(self, path):
self.fold += 1
self.bootstrapped_idxs = []
self.bootstrapped_datasets = []
self.path = path
def fit(self, X, Y):
X = X.values
Y = Y.values
idxs = np.arange(len(X))
# fit each model in the ensemble
for i in range(self.n_estimators):
model = self.model[i]
# do bootstrapping given the validation data
bootstrap_idxs = random.choices(idxs, k=self.num_samples)
bootstrap_X = X[bootstrap_idxs]
bootstrap_Y = Y[bootstrap_idxs]
if 1 == len(bootstrap_X.shape):
bootstrap_X = np.expand_dims(np.asarray(bootstrap_X), -1)
if 1 == len(bootstrap_Y.shape):
bootstrap_Y = np.expand_dims(np.asarray(bootstrap_Y), -1)
self.bootstrapped_idxs.append(bootstrap_idxs)
self.bootstrapped_datasets.append(bootstrap_X)
model.fit(bootstrap_X, bootstrap_Y)
def predict(self, X, return_std=False):
if isinstance(X, pd.DataFrame):
X = X.values
all_preds = []
means = []
for x_i in range(len(X)):
preds = []
for i in range(self.n_estimators):
sample_X = X[x_i]
if 1 == len(sample_X.shape):
sample_X = np.expand_dims(np.asarray(sample_X), 0)
preds.append(self.model[i].predict(sample_X))
all_preds.append(preds)
means.append(np.mean(preds))
# NOTE if manual implementation is desired
# https://www.jpytr.com/post/random_forests_and_jackknife_variance/
# https://github.com/scikit-learn-contrib/forest-confidence-interval/tree/master/forestci
# http://contrib.scikit-learn.org/forest-confidence-interval/reference/forestci.html
self.all_preds = all_preds
return np.asarray(means)
# check for failed fits, warn users, and re-calculate
def stats_check_models(self, X, Y):
if self.n_estimators > 10:
maes = []
for i in range(self.n_estimators):
abs_errors = np.absolute(np.absolute(np.squeeze(np.asarray(self.all_preds)[:,i])) - Y)
maes.append(sum(abs_errors) / len(abs_errors))
alpha = 0.01
bad_idxs = []
for i in range(self.n_estimators):
other_maes = np.delete(maes, [i])
# ref: https://towardsdatascience.com/statistical-significance-hypothesis-testing-the-normal-curve-and-p-values-93274fa32687
z_score = (maes[i] - np.mean(other_maes)) / np.std(other_maes)
# ref: https://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in/3508321
p_val = stats.norm.sf(abs(z_score))*2
if p_val <= alpha:
# TODO ok to print these/how to print/log properly?
print("Estimator {} failed under statistical significance threshold {} (p_val {}), relevant dataset output to file with name format \'<fold>_<estimator idx>_bootstrapped_dataset.csv\'".format(i, alpha, p_val))
print("bad estimator mae: {}".format(maes[i]))
print("mean mae (for ref):")
print( | np.mean(maes) | numpy.mean |
# AUTOGENERATED! DO NOT EDIT! File to edit: game.ipynb (unless otherwise specified).
__all__ = ['create_maze', 'shame_game', 'tough_game', 'random_move', 'available_moves']
# Cell
import numpy as np
# Cell
def create_maze(n, k=4, t=4, prng=None):
"""Create a maze, by diffusing out from k points.
Params
------
n : int
Board size
k : int
The number of starting points
t : int
The number of time steps for diffusion
prng : None, np.random.RandonState
Controls seeding
"""
if prng is None:
prng = np.random.RandomState()
maze = np.zeros((n, n))
# Initial seends
locs = []
for _ in range(k):
x0, y0 = prng.randint(0, n, 2)
locs.append((x0, y0))
# Moving t steps from x0,y0
# fill in neighbors
# by random draw
for x0, y0 in locs:
x, y = x0, y0
maze[x, y] = 1
for step in range(t):
# Draw
dx = prng.randint(-1, 2)
if np.isclose(dx, 0):
dy = prng.randint(-1, 2)
else:
dy = 0
# Sane and safe moves?
if x + dx < 0:
dx = 0
if x + dx >= n:
dx = 0
if y + dy < 0:
dy = 0
if y + dy >= n:
dy = 0
# Add to maze
x += dx
y += dy
maze[x, y] = 1
return maze, prng
# Cell
def shame_game(n, sigma=0.5, shame=1, maze=None, prng=None):
if prng is None:
prng = | np.random.RandomState() | numpy.random.RandomState |
import numpy as np
EPS = 1e-3
def xy2mrx_v0(a_xy, b_pv, grid):
"""
Encode xy to activations on a grid
Activation symmetric so that flipping xy about
b's v yields same values
Clip activations that would be outside grid to map to last coordinate
:param a_xy: n, 2 | (x_x, x_y)
:param b_pv: n, 4 | (x_x, x_y, v_x, v_y)
:param grid: (l, m) | symmetric (about 0) grid in 1D
l | upper bound for coordinates
m | number of grid points
grid points are regularly spaced in [0, l]
:return: n, m | encoding for each (of n) agent
"""
n = a_xy.shape[0]
m = int(grid[-1])
mrx = np.zeros((n, m), dtype=np.float)
pos_dif = a_xy - b_pv[:, :2] # n, 2
pos_dif_dot_v = np.einsum('ij, ij -> i', pos_dif, b_pv[:, 2:]) # n
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
perp_ds = (pos_dif ** 2).sum(axis=1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
np.clip(perp_ds, a_min=None, a_max=grid[0]-EPS, out=perp_ds)
d = grid[0] / (grid[1] - 1)
for i in range(n):
a, r = np.divmod(perp_ds[i], d)
th = 1 - r/d
inds = np.array([a, a+1], dtype=np.int)
mrx[i, inds] = np.array([th, 1-th])
return mrx
def xy2mrx_v1(a_xy, b_pv, grid):
"""
Encode xy to activations on a grid
Activation symmetric so that flipping xy about
b's v yields same values
Clip activations that would be outside grid to map to last coordinate
:param a_xy: n, 2 | (x_x, x_y)
:param b_pv: n, 4 | (x_x, x_y, v_x, v_y)
:param grid: (l, m) | symmetric (about 0) grid in 1D
l | upper bound for coordinates
m | number of grid points
grid points are regularly spaced in [0, l]
:return: n, m | encoding for each (of n) agent
"""
n = a_xy.shape[0]
m = int(grid[-1])
mrx = np.zeros((n, m), dtype=np.float)
pos_dif = a_xy - b_pv[:, :2] # n, 2
pos_dif_dot_v = np.einsum('ij, ij -> i', pos_dif, b_pv[:, 2:]) # n
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
perp_ds = (pos_dif ** 2).sum(axis=1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
np.clip(perp_ds, a_min=None, a_max=grid[0]-EPS, out=perp_ds)
d = grid[0] / (grid[1] - 1)
a, r = np.divmod(perp_ds, d)
a = a.astype(np.int)
th = 1 - r/d
row_inds = np.arange(n)
mrx[row_inds, a] = th
mrx[row_inds, a + 1] = 1 - th
return mrx
def evaluate_v_v0(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n, | q=0 for using grid encoding, q=1 for not
:param b_inds: n, | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n, 2 | \hat{v} for each (of n) agent
"""
n = a_pv.shape[0]
v_hat = a_pv[:, 2:].copy()
d = grid[0] / (grid[1] - 1)
for i in range(n):
if q[i]:
continue
b_pv_i = b_pv[b_inds[i], :]
pos_dif = a_pv[i, :2] - b_pv_i[:2] # 2,
pos_dif_dot_v = pos_dif.dot(b_pv_i[2:])
v_normsq = (b_pv_i[2:] ** 2).sum(axis=-1)
v_normsq = EPS if v_normsq < EPS else v_normsq
perp_ds = (pos_dif ** 2).sum(axis=-1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
perp_ds = np.clip(perp_ds, a_min=None, a_max=grid[0] - EPS)
a, r = np.divmod(perp_ds, d)
a = a.astype(np.int)
th = 1 - r / d
scaling = u[a] * th + u[a + 1] * (1 - th)
v_hat[i, :] *= scaling
return v_hat
def evaluate_v_v1(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n, | q=0 for using grid encoding, q=1 for not
:param b_inds: n, | index of b \in [0, k-1]
:return: n, 2 | \hat{v} for each (of n) agent
"""
n = a_pv.shape[0]
v_hat = a_pv[:, 2:].copy()
d = grid[0] / (grid[1] - 1)
# n->k pe dist
pos_dif = a_pv[:, np.newaxis, :2] - b_pv[np.newaxis, :, :2]
# n, k
pos_dif_dot_v = np.einsum('ijk, jk -> ij', pos_dif, b_pv[:, 2:])
# k
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
# n, k
pe_dist_sq = (pos_dif ** 2).sum(axis=-1) - (pos_dif_dot_v ** 2) / v_normsq
pe_dist = np.sqrt(pe_dist_sq)
np.clip(pe_dist, a_min=None, a_max=grid[0] - EPS, out=pe_dist)
# subset n, st. q=0: n_q0,
q0_mask = q == 0
# n_q0,
a, r = np.divmod(pe_dist[q0_mask, b_inds[q0_mask]], d)
a = a.astype(np.int)
th = 1 - r / d
scaling = u[a] * th + u[a + 1] * (1 - th)
v_hat[q0_mask, :] = (v_hat[q0_mask, :].T * scaling).T
return v_hat
def evaluate_v_particles_v0(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n_p, n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n_p, n | q=0 for using grid encoding, q=1 for not
:param b_inds: n_p, n | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n_p, n, 2 | \hat{v} for each (of n) agent
"""
n_p, n = a_pv.shape[:2]
v_hat = np.empty((n_p, n, 2))
for i in range(n_p):
v_hat[i, ...] = evaluate_v_v1(a_pv[i], b_pv, grid, u, q[i], b_inds[i])
return v_hat
def evaluate_v_particles_v1(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n_p, n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n_p, n | q=0 for using grid encoding, q=1 for not
:param b_inds: n_p, n | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n_p, n, 2 | \hat{v} for each (of n) agent
"""
n_p, n = a_pv.shape[:2]
v_hat = evaluate_v_v1(
a_pv.reshape(-1, 4), b_pv, grid, u, q.reshape(-1), b_inds.reshape(-1)).reshape(n_p, n, 2)
return v_hat
def main_evaluate_v_particles():
from timeit import timeit
seed = np.random.randint(0, 1000)
# seed = 0
np.random.seed(seed)
print('seed: {}'.format(seed))
n_p = 100
n = 20
k = 3
a_pv = np.random.randn(n_p, n, 4)
b_pv = np.random.randn(k, 4) * 2
grid = np.array([5., 6]) # [0, 1, ..., 5]
u = np.arange(grid[1]) / grid[1]
q = np.random.randn(n_p, n) > 0
b_inds = np.random.choice(k, n_p*n).reshape(n_p, n)
print('---------------')
x_true = evaluate_v_particles_v0(a_pv, b_pv, grid, u, q, b_inds)
x_hat = evaluate_v_particles_v1(a_pv, b_pv, grid, u, q, b_inds)
print('diff: {:0.4f}'.format(np.linalg.norm(x_true - x_hat)))
n_tries = 2
args = (a_pv, b_pv, grid, u, q, b_inds)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_particles_v0, args=args))/n_tries)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_particles_v1, args=args))/n_tries)
def main_evaluate_v():
from timeit import timeit
seed = np.random.randint(0, 1000)
# seed = 0
np.random.seed(seed)
print('seed: {}'.format(seed))
n = 200
k = 3
a_pv = np.random.randn(n, 4)
b_pv = np.random.randn(k, 4) * 2
grid = np.array([5., 6]) # [0, 1, ..., 5]
u = | np.arange(grid[1]) | numpy.arange |
from abc import ABC
import numpy
from ._helpers import resample
algorithms = {"dunkin", "fast-delta"}
def is_arraylike(arr, size):
"""Check input array."""
return isinstance(arr, (list, tuple, numpy.ndarray)) and | numpy.size(arr) | numpy.size |
# coding: utf-8
# In[ ]:
from enum import Enum
import os
TRAIN_DIR = os.path.join('../stage_1_train_images', '')
TRAIN_DIR_STAGE_2 = os.path.join('../stage_2_train_images', '')
TEST_DIR = os.path.join('../stage_1_test_images', '')
TEST_DIR_STAGE_2 = os.path.join('../stage_2_test_images', '')
CSV_FILENAME = 'submission.csv'
class HemorrhageTypes(Enum):
ANY = "any"
EP = "epidural"
IN_PA = "intraparenchymal"
IN_VE = "intraventricular"
SUB_AR = "subarachnoid"
SUB_DU = "subdural"
# There are at least 5 windows that a radiologist goes through for each scan!
# Brain Matter window : W:80 L:40
# Blood/subdural window: W:130-300 L:50-100
# Soft tissue window: W:350–400 L:20–60
# Bone window: W:2800 L:600
# Grey-white differentiation window: W:8 L:32 or W:40 L:40
BRAIN_MATTER_WINDOW = (40, 80)
SUBDURAL_WINDOW = (80, 200)
SOFT_TISSUE_WINDOW = (40, 380)
BONE_WINDOW = (600, 2800)
GRAY_WHITE_DIFFERENTIATION_WINDOW = (40, 40)
ALL_WINDOW_VALUES = {'BRAIN_MATTER': BRAIN_MATTER_WINDOW,
'SUBDURAL': SUBDURAL_WINDOW,
'SOFT_TISSUE': SOFT_TISSUE_WINDOW,
'BONE': BONE_WINDOW,
'GRAY_WHITE': GRAY_WHITE_DIFFERENTIATION_WINDOW}
KERNEL_WIDTH = 13
KERNEL_HEIGHT = 13
GAUSS_MEAN = 0.1
GAUSS_STDDEV = 0.05
BRIGHTNESS_DELTA = 0.4
# In[ ]:
def create_output_csv(output_dict):
content = "ID,Label\n"
for image_id in output_dict:
for num, hemorrhageType in enumerate(HemorrhageTypes, start=0):
content += create_output_line(image_id, hemorrhageType.value, output_dict[image_id][num])
with open(CSV_FILENAME, "w") as f:
f.write(content)
def create_output_line(image_id, hemorrhage_type, probability):
return image_id + "_" + hemorrhage_type + "," + str(probability) + "\n"
# In[ ]:
import os
import pandas as pd
import glob
import pydicom
import numpy as np
def get_sequence_clipping_order(seq_length):
indices = []
elem = 0
for idx, i in enumerate(reversed(range(seq_length))):
indices.append(elem)
if idx % 2 == 0:
elem += i
else:
elem -= i
return indices
def print_error(message):
c_red = '\033[95m'
c_end = '\033[0m'
print(c_red + message + c_end)
def get_csv_train(data_prefix=TRAIN_DIR_STAGE_2):
train_df = pd.read_csv(os.path.join(data_prefix, 'stage_2_train.csv'))
train_df[['ID', 'subtype']] = train_df['ID'].str.rsplit('_', 1,
expand=True)
train_df = train_df.rename(columns={'ID': 'id', 'Label': 'label'})
train_df = pd.pivot_table(train_df, index='id',
columns='subtype', values='label')
train_df.to_csv("labels_2.csv")
return train_df
def extract_csv_partition():
df = get_csv_train()
meta_data_train = combine_labels_metadata(TRAIN_DIR_STAGE_2)
negative, positive = df.loc[df['any'] == 0], df.loc[df['any'] == 1]
negative_study_uids = list(meta_data_train.query("any == 0")['StudyInstanceUID'])
indices = np.arange(min(len(negative_study_uids), len(positive.index)))
np.random.shuffle(indices)
negative_study_uids = np.array(negative_study_uids)[indices]
selected_negative_studies = meta_data_train.loc[meta_data_train['StudyInstanceUID'].isin(negative_study_uids)]
selected_negative_studies = selected_negative_studies.drop(
set(selected_negative_studies.columns).intersection(set(negative.columns)), axis=1)
negative = negative.merge(selected_negative_studies, how='left', on='id').dropna()
negative = negative.drop(selected_negative_studies.columns, axis=1)
return pd.concat([positive, negative])
def extract_metadata(data_prefix=TRAIN_DIR_STAGE_2):
filenames = glob.glob(os.path.join(data_prefix, "*.dcm"))
get_id = lambda p: os.path.splitext(os.path.basename(p))[0]
ids = map(get_id, filenames)
dcms = map(pydicom.dcmread, filenames)
columns = ['BitsAllocated', 'BitsStored', 'Columns', 'HighBit',
'Modality', 'PatientID', 'PhotometricInterpretation',
'PixelRepresentation', 'RescaleIntercept', 'RescaleSlope',
'Rows', 'SOPInstanceUID', 'SamplesPerPixel', 'SeriesInstanceUID',
'StudyID', 'StudyInstanceUID', 'ImagePositionPatient',
'ImageOrientationPatient', 'PixelSpacing']
meta_dict = {col: [] for col in columns}
for img in dcms:
for col in columns:
meta_dict[col].append(getattr(img, col))
meta_df = pd.DataFrame(meta_dict)
del meta_dict
meta_df['id'] = pd.Series(ids, index=meta_df.index)
split_cols = ['ImagePositionPatient1', 'ImagePositionPatient2',
'ImagePositionPatient3', 'ImageOrientationPatient1',
'ImageOrientationPatient2', 'ImageOrientationPatient3',
'ImageOrientationPatient4', 'ImageOrientationPatient5',
'ImageOrientationPatient6', 'PixelSpacing1',
'PixelSpacing2']
meta_df[split_cols[:3]] = pd.DataFrame(meta_df.ImagePositionPatient.values.tolist())
meta_df[split_cols[3:9]] = pd.DataFrame(meta_df.ImageOrientationPatient.values.tolist())
meta_df[split_cols[9:]] = pd.DataFrame(meta_df.PixelSpacing.values.tolist())
meta_df = meta_df.drop(['ImagePositionPatient', 'ImageOrientationPatient', 'PixelSpacing'], axis=1)
meta_df.to_csv(os.path.join(data_prefix, 'test_meta_2.csv'))
return meta_df
def combine_labels_metadata(data_prefix=TRAIN_DIR_STAGE_2):
meta_df = extract_metadata(data_prefix)
df = get_csv_train(data_prefix)
df = df.merge(meta_df, how='left', on='id').dropna()
df.sort_values(by='ImagePositionPatient3', inplace=True, ascending=False)
df.to_csv(os.path.join(data_prefix, 'train_meta_2.csv'))
return df
# In[ ]:
import cv2
# the kernel sizes must be positive odd integers but they do not have to be equal
# the larger they are the more the image will be blurred
def blur_image(pixel_matrix, kernel_size_width=KERNEL_WIDTH, kernel_size_height=KERNEL_HEIGHT):
return cv2.GaussianBlur(pixel_matrix, (kernel_size_width, kernel_size_height), cv2.BORDER_DEFAULT)
def noisy(image, mean=GAUSS_MEAN, stddev=GAUSS_STDDEV):
gauss = np.random.normal(mean, stddev, image.shape)
noisy = image + gauss
noisy_min = np.amin(noisy)
noisy_max = np.amax(noisy)
noisy = (noisy - noisy_min) / (noisy_max - noisy_min)
return noisy
def adjust_brightness(image, delta=BRIGHTNESS_DELTA):
image += delta
image[image < 0] = 0
image[image > 1] = 1
return image
# In[ ]:
import copy
import pydicom
import scipy
from skimage import morphology
from skimage.transform import resize
class Preprocessor:
@staticmethod
def apply_hounsfield(image, intercept, slope):
if slope is not 1:
image = slope * image.astype(np.float64)
image = image.astype(np.float64)
image += np.float64(intercept)
# Setting values smaller than air, to air. Values smaller than -1024, are probably just outside the scanner.
image[image < -1024] = -1024
return image
@staticmethod
def windowing(image, custom_center=30, custom_width=100, rescale=True):
new_image = copy.deepcopy(image)
min_value = custom_center - (custom_width / 2)
max_value = custom_center + (custom_width / 2)
# Including another value for values way outside the range, to (hopefully) make segmentation processes easier.
new_image[new_image < min_value] = min_value
new_image[new_image > max_value] = max_value
if rescale:
new_image = (new_image - min_value) / (max_value - min_value)
return new_image
@staticmethod
def image_resample(image, pixel_spacing, new_spacing=[1, 1]):
pixel_spacing = map(float, pixel_spacing)
spacing = np.array(list(pixel_spacing))
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)
return image
@staticmethod
def image_background_segmentation(image, WL=30, WW=100, rescale=True):
lB = WW - WL
uB = WW + WL
# Keep only values inside of the window
background_separation = np.logical_and(image > lB, image < uB)
# Get largest connected component:
# From https://github.com/nilearn/nilearn/blob/master/nilearn/_utils/ndimage.py
background_separation = morphology.dilation(background_separation, np.ones((5, 5)))
labels, label_nb = scipy.ndimage.label(background_separation)
label_count = np.bincount(labels.ravel().astype(np.int))
# discard the 0 label
label_count[0] = 0
mask = labels == label_count.argmax()
# Fill holes in the mask
mask = morphology.dilation(mask, np.ones((5, 5))) # dilate the mask for less fuzy edges
mask = scipy.ndimage.morphology.binary_fill_holes(mask)
mask = morphology.dilation(mask, np.ones((3, 3))) # dilate the mask again
image = mask * image
if rescale:
img_min = np.amin(image)
img_max = np.amax(image)
image = (image - img_min) / (img_max - img_min)
return image
@staticmethod
def preprocess(image_path):
dicom = pydicom.read_file(image_path)
image = dicom.pixel_array.astype(np.float64)
if image.shape != (299, 299):
image = resize(image, (299, 299))
p = Preprocessor
image = p.apply_hounsfield(image, dicom.RescaleIntercept, dicom.RescaleSlope)
image = p.windowing(image)
return image
@staticmethod
def augment(image):
augmented = list()
augmented.append(blur_image(image))
augmented.append(noisy(image))
augmented.append(adjust_brightness(image, 0.3))
return augmented
# In[ ]:
import os
import random
from keras.utils import Sequence
class DataGenerator(Sequence):
def __init__(self, list_ids, labels=None, batch_size=1, img_size=(299, 299, 3),
img_dir=TRAIN_DIR_STAGE_2, shuffle=True, n_classes=2):
self.list_ids = list_ids
self.indices = np.arange(len(self.list_ids))
self.labels = labels
self.batch_size = batch_size
self.img_size = img_size
self.img_dir = img_dir
self.shuffle = shuffle
self.n_classes = n_classes
# TODO: this could be generalized with the help of
# an Augmenter class
self.n_augment = 3 # 3 data augmentation functions
self.augment_funcs = [blur_image,
noisy,
adjust_brightness,
lambda img: img] # identity function
self.on_epoch_end()
if labels is not None:
# Weights should be a probability distribution.
# If the number of training instances is too large,
# there could be issues! (arithmetic underflow)
weight_func = lambda row: 1.0 if row["any"] == 0 else self.n_augment + 1
self.weights = labels.apply(weight_func, axis=1)
total = self.weights.sum()
self.weights = (self.weights / total).values
# set random seed, hope this randomizes starting value for
# each worker
random.seed(os.urandom(8))
def __len__(self):
return len(self.indices) // self.batch_size
def __getitem__(self, index):
indices = np.random.choice(self.indices, size=self.batch_size,
replace=False, p=self.weights)
return self.__data_generation(indices)
# Don't think this is necessary anymore, indices are sampled randomly.
def on_epoch_end(self):
pass
def __data_generation(self, indices):
x = np.empty((self.batch_size, *self.img_size))
if self.labels is not None: # training phase
if self.n_classes == 2:
y = np.empty((self.batch_size,), dtype=np.float32)
else:
y = np.empty((self.batch_size, self.n_classes), dtype=np.float32)
for i, idx in enumerate(indices):
image = Preprocessor.preprocess(self.img_dir + self.list_ids[idx] + ".dcm")
if self.labels.iloc[idx]['any'] == 1:
image = self.augment_funcs[random.randint(0, self.n_augment)](image)
image = np.array(image)
image = np.repeat(image[..., np.newaxis], 3, -1)
x[i,] = image
if self.n_classes == 2:
y[i, ] = self.labels.iloc[idx]['any']
elif self.n_classes == 5:
y[i, ] = self.labels.iloc[idx, 1:]
else:
y[i, ] = self.labels.iloc[idx]
return x, y
else: # test phase
for i, idx in enumerate(indices):
image = Preprocessor.preprocess(self.img_dir + self.list_ids[idx] + ".dcm")
image = np.repeat(image[..., np.newaxis], 3, -1)
x[i,] = image
return x
# In[ ]:
import os
from keras.utils import Sequence
class LSTMDataGenerator(Sequence):
def __init__(self, list_ids, labels=None, batch_size=1, img_size=(299, 299, 3),
sequence_size=10, img_dir=TRAIN_DIR_STAGE_2, shuffle=True):
# here, list_ids is a series of lists; each list represents an
# ordered sequence of scans that compose a single study
self.list_ids = list_ids
self.indices = np.arange(len(self.list_ids))
self.labels = labels
self.batch_size = batch_size
self.img_size = img_size
self.sequence_size = sequence_size
self.img_dir = img_dir
self.shuffle = shuffle
# TODO: this could be generalized with the help of
# an Augmenter class
self.n_augment = 3 # 3 data augmentation functions
self.augment_funcs = [blur_image,
noisy,
adjust_brightness,
lambda img: img] # identity function
self.on_epoch_end()
if labels is not None:
# Weights should be a probability distribution.
# If the number of training instances is too large,
# there could be issues! (arithmetic underflow)
weight_func = lambda seq: (float(self.n_augment + 1)
if any([labels[0] for labels in seq])
else 1.0)
self.weights = np.array(list(map(weight_func, self.labels)))
total = np.sum(self.weights)
self.weights = (self.weights / total)
def __len__(self):
return len(self.indices) // self.batch_size
def __getitem__(self, index):
indices = np.random.choice(self.indices, size=self.batch_size,
replace=False, p=self.weights)
return self.__data_generation(indices)
def on_epoch_end(self):
pass
def __data_generation(self, indices):
x = np.empty((self.batch_size, self.sequence_size, *self.img_size))
preprocess_func = lambda im: Preprocessor.preprocess(os.path.join(self.img_dir, im + ".dcm"))
if self.labels is not None: # training phase
y = np.empty((self.batch_size, self.sequence_size, 6), dtype=np.float32)
for i, idx in enumerate(indices):
seq = self.list_ids[idx]
seq_labels = np.array(self.labels[idx])
seq_len = len(seq)
# if there is an any label = 1, set has_hemorrhage flag
has_hemorrhage = np.any(seq_labels[0])
imgs = map(preprocess_func, seq)
if has_hemorrhage:
# augment images
func_idxs = np.random.randint(0, self.n_augment + 1,
size=seq_len)
imgs = [self.augment_funcs[j](img)
for j, img in zip(func_idxs, imgs)]
else:
# consume map generator
imgs = list(imgs)
imgs = np.array(imgs)
imgs = np.repeat(imgs[..., np.newaxis], 3, -1)
diff = seq_len - self.sequence_size
if diff < 0:
padding = np.repeat(np.zeros(imgs.shape[1:])[np.newaxis, ...], abs(diff), 0)
imgs = np.concatenate((imgs, padding), axis=0)
seq_padding = np.repeat(np.zeros(6)[np.newaxis, ...], abs(diff), 0)
seq_labels = np.concatenate((seq_labels, seq_padding), axis=0)
elif diff > 0:
indices = get_sequence_clipping_order(seq_len)
imgs = np.delete(imgs, indices[:diff], 0)
seq_labels = np.delete(seq_labels, indices[:diff], 0)
x[i,] = imgs
y[i,] = seq_labels[:, :]
return x, y
else: # test phase
for i, idx in enumerate(indices):
seq = self.list_ids[idx]
seq_len = len(seq)
imgs = np.array(list(map(preprocess_func, seq)))
imgs = np.repeat(imgs[..., np.newaxis], 3, -1)
diff = seq_len - self.sequence_size
if diff < 0:
padding = np.repeat(np.zeros(imgs.shape[1:])[np.newaxis, ...], abs(diff), 0)
imgs = np.concatenate((imgs, padding), axis=0)
elif diff > 0:
indices = get_sequence_clipping_order(seq_len)
imgs = np.delete(imgs, indices[:diff], 0)
x[i,] = imgs
return x
# In[ ]:
from keras.applications import NASNetLarge, InceptionResNetV2, Xception, DenseNet201, ResNet50
from keras.layers import Bidirectional, LSTM, TimeDistributed, Masking
from keras.layers import Dense, Dropout, BatchNormalization, LeakyReLU, Input, GlobalAveragePooling2D
from keras.models import Sequential, Model
class StandardModel:
def __init__(self, network='xception', input_shape=(299, 299, 3), pooling_method='max', classes=2, use_softmax=True):
self.base_model = self.get_base_model(network, input_shape, pooling_method)
self.classes = classes
self.use_softmax = use_softmax
self.input_shape = input_shape
@staticmethod
def get_base_model(network, input_shape, pooling_method):
network = network.lower()
input_warning_message = 'WARNING! The input shape is not the default one!!! Proceeding anyway!'
if network == 'nas':
if input_shape != (331, 331, 3):
print_error(input_warning_message)
return NASNetLarge(input_shape=input_shape, include_top=False, pooling=pooling_method,
weights=None)
elif network == 'inception':
if input_shape != (299, 299, 3):
print_error(input_warning_message)
return InceptionResNetV2(input_shape=input_shape, include_top=False, pooling=pooling_method,
weights='imagenet')
elif network == 'xception':
if input_shape != (299, 299, 3):
print_error(input_warning_message)
return Xception(input_shape=input_shape, include_top=False, pooling=pooling_method,
weights='imagenet')
elif network == 'densenet':
if input_shape != (224, 224, 3):
print_error(input_warning_message)
return DenseNet201(input_shape=input_shape, include_top=False, pooling=pooling_method,
weights='imagenet')
elif network == 'resnet':
if input_shape != (224, 224, 3):
print_error(input_warning_message)
return ResNet50(input_shape=input_shape, include_top=False, pooling=pooling_method,
weights='imagenet')
else:
print_error(f'Invalid network name: {network}! Please choose from: \n ')
return None
def build_model(self):
return self.build_binary_model() if self.classes == 2 else self.build_multi_class_model()
def build_binary_model(self):
model = Sequential()
model.add(self.base_model)
model.add(Dense(96))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
return model
def build_multi_class_model(self):
return self.build_probability_model() if self.use_softmax else self.build_recurrent_model()
def build_probability_model(self):
model = Sequential()
model.add(self.base_model)
model.add(Dense(96, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(self.classes, activation='sigmoid'))
return model
def build_recurrent_model(self):
inputs = Input(shape=(10, *self.input_shape))
time_dist = TimeDistributed(self.base_model)(inputs)
global_pool = TimeDistributed(GlobalAveragePooling2D())(time_dist)
dense_relu = TimeDistributed(Dense(256, activation='relu'))(global_pool)
masked = Masking(0.0)(dense_relu)
out = Bidirectional(LSTM(64, return_sequences=True, activation='softsign'))(masked)
out = TimeDistributed(Dense(self.classes, activation='sigmoid'))(out)
model = Model(inputs=inputs, outputs=out)
return model
def build_simple_recurrent_model(self):
inputs = Input(shape=(None, self.classes))
lstm = Bidirectional(LSTM(64, activation='softsign', return_sequences=True))(inputs)
outputs = TimeDistributed(Dense(self.classes, activation='sigmoid'))(lstm)
model = Model(inputs=inputs, outputs=outputs)
return model
# In[ ]:
import glob
import os
import sys
import time
import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import keras.backend as K
from keras.optimizers import Adamax, SGD
from sklearn.metrics import log_loss
from keras.callbacks import ModelCheckpoint, Callback
class WeightsSaver(Callback):
def __init__(self, N):
self.N = N
self.batch = 0
def on_batch_end(self, batch, logs={}):
if self.batch % self.N == 0:
name = 'model_weights.h5'
self.model.save_weights(name)
self.batch += 1
def prepare_data(only_positives=False):
csv = pd.read_csv(os.path.join('data/train', 'labels_2.csv'))
files = glob.glob(os.path.join(TRAIN_DIR_STAGE_2, "*.dcm"))
files = list(map(lambda x: os.path.splitext(os.path.basename(x))[0], files))
filtered_csv = csv[csv.id.isin(files)]
if only_positives:
filtered_csv = filtered_csv.loc[filtered_csv['any'] == 1]
indices = np.random.rand(len(filtered_csv))
mask = indices < 0.9
x_train, y_train = list(filtered_csv[mask].id), filtered_csv.iloc[mask, 1:]
x_test, y_test = list(filtered_csv[~mask].id), filtered_csv.iloc[~mask, 1:]
# x_train.reset_index(inplace=True, drop=True)
y_train.reset_index(inplace=True, drop=True)
# x_test.reset_index(inplace=True, drop=True)
y_test.reset_index(inplace=True, drop=True)
return x_train, y_train, x_test, y_test
def prepare_sequential_data(only_positives=False, for_prediction=False):
if not for_prediction:
# open label + metadata CSV
csv = pd.read_csv(os.path.join('data/train', "train_meta_2.csv"))
# sort by study ID and position
csv.sort_values(by=["StudyInstanceUID", "ImagePositionPatient3"], inplace=True, ascending=False)
label_columns = ["any", "epidural", "intraparenchymal",
"intraventricular", "subarachnoid", "subdural"]
# filter unnecessary columns
csv = csv[["StudyInstanceUID", "id"] + label_columns]
if only_positives:
csv = csv.loc[csv['any'] == 1]
# get sequences of IDs (groupby preserves order)
sequences = csv.groupby("StudyInstanceUID")["id"].apply(list)
# group labels into one single column
csv["labels"] = csv[label_columns].values.tolist()
# get sequences of labels
labels = csv.groupby("StudyInstanceUID")["labels"].apply(list)
# indices = np.random.rand(sequences.size)
# # partition data
# mask = indices < 0.001
# x_train = list(sequences.iloc[mask])
# y_train = list(labels.iloc[mask])
x_train = list(sequences)
y_train = list(labels)
return x_train, y_train
else:
csv = pd.read_csv(os.path.join('data/train', "test_meta_2.csv"))
# sort by study ID and position
csv.sort_values(by=["StudyInstanceUID", "ImagePositionPatient3"], inplace=True, ascending=False)
# filter unnecessary columns
csv = csv[["StudyInstanceUID", "id"]]
# get sequences of IDs (groupby preserves order)
sequences = csv.groupby("StudyInstanceUID")["id"].apply(list)
x_test = list(sequences)
return x_test
def test_recurrent_network():
def generate_single_instance(instance):
images, labels = list(), list()
for file in instance:
file_path = os.path.join(TRAIN_DIR, file)
images.append(Preprocessor.preprocess(file_path))
labels.append(np.random.uniform(0, 1, 5))
images = np.stack(images, axis=0)
labels = np.stack(labels, axis=0)
return images, labels
model = StandardModel('xception', (512, 512, 3), classes=5, use_softmax=False, pooling_method=None)
model = model.build_model()
model.compile(Adamax(), loss='categorical_crossentropy', metrics=['acc'])
model.summary()
x_train = []
y_train = []
data = [['ID_00025ef4b.dcm', 'ID_00027c277.dcm', 'ID_00027cbb1.dcm', 'ID_00027c277.dcm', 'ID_00027cbb1.dcm',
'ID_00027c277.dcm', 'ID_00027cbb1.dcm', 'ID_00027c277.dcm', 'ID_00027cbb1.dcm', 'ID_00027cbb1.dcm']]
for i in range(1):
instance_images, instance_labels = generate_single_instance(data[i])
x_train.append(instance_images)
y_train.append(instance_labels)
x_train = np.stack(x_train)
x_train = np.repeat(x_train[..., np.newaxis], 3, -1)
y_train = np.stack(y_train)
print(x_train.shape, y_train.shape)
model.fit(x_train, y_train, batch_size=1)
def plot_model_graph(history, graph_name):
plt.plot(history.history['acc'])
plt.plot(history.history['loss'])
plt.title('Model Accuracy & Loss')
plt.ylabel('Accuracy & Loss')
plt.xlabel('Epoch')
plt.savefig(graph_name)
def weighted_log_loss(y_true, y_pred):
class_weights = np.array([2., 1., 1., 1., 1., 1.])
eps = K.epsilon()
y_pred = K.clip(y_pred, eps, 1.0 - eps)
out = -(y_true * K.log(y_pred) * class_weights +
(1.0 - y_true) * K.log(1.0 - y_pred) * class_weights)
return K.mean(out, axis=-1)
def train_binary_model(base_model, model_name, already_trained_model=None):
x_train, y_train, x_test, y_test = prepare_data()
if not already_trained_model:
model = StandardModel(base_model, (512, 512, 3), classes=2, use_softmax=True)
model = model.build_model()
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(DataGenerator(x_train, labels=y_train, n_classes=2, batch_size=8), epochs=1)
model.save(model_name)
else:
if os.path.exists(already_trained_model):
model = keras.models.load_model(already_trained_model)
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(DataGenerator(x_train, labels=y_train, n_classes=2, batch_size=8), epochs=1)
model.save(model_name)
else:
print_error("Provided model file doesn't exist! Exiting...")
sys.exit(1)
def train_multi_class_model(base_model, model_name, already_trained_model=None, n_classes=5):
x_train, y_train, x_test, y_test = prepare_data()
checkpoint_acc = keras.callbacks.ModelCheckpoint(model_name, monitor='loss', verbose=0,
save_best_only=False, save_weights_only=False, mode='auto', period=1)
if not already_trained_model:
model = StandardModel(base_model, (299, 299, 3), classes=n_classes, use_softmax=True)
model = model.build_model()
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(DataGenerator(x_train, labels=y_train, n_classes=n_classes, batch_size=8), epochs=5, callbacks=[checkpoint_acc])
model.save(model_name)
else:
if os.path.exists(already_trained_model):
model = keras.models.load_model(already_trained_model)
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(DataGenerator(x_train, labels=y_train, n_classes=n_classes, batch_size=8), epochs=3, callbacks=[checkpoint_acc])
model.save(model_name)
else:
print_error("Provided model file doesn't exist! Exiting...")
sys.exit(1)
def train_recurrent_multi_class_model(base_model, model_name, already_trained_model=None):
x_train, y_train = prepare_sequential_data()
if not already_trained_model:
model = StandardModel(base_model, (299, 299, 3), classes=6, use_softmax=False, pooling_method=None)
model = model.build_model()
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(LSTMDataGenerator(x_train, labels=y_train), epochs=5)
model.save(model_name)
else:
if os.path.exists(already_trained_model):
model = keras.models.load_model(already_trained_model)
model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
model.fit_generator(LSTMDataGenerator(x_train, labels=y_train), epochs=3)
model.save(model_name)
else:
print_error("Provided model file doesn't exist! Exiting...")
sys.exit(1)
def construct_probabilities_sequences(x_train, y_train, loaded_multi_class_model):
def preprocess_func(im):
return Preprocessor.preprocess(os.path.join(TRAIN_DIR_STAGE_2, im + ".dcm"))
new_x_train = list()
new_y_train = list()
print(len(x_train))
count = 1
ideal_length = max([len(seq) for seq in x_train])
for seq, label_seq in zip(x_train, y_train):
print(count)
label_seq = np.array(label_seq)
padding = np.zeros((ideal_length, 6))
label_padding = np.zeros((ideal_length, 6))
preprocessed_seq = np.array(list(map(preprocess_func, seq)))
preprocessed_seq = np.array([np.repeat(p[..., np.newaxis], 3, -1) for p in preprocessed_seq])
predictions = loaded_multi_class_model.predict(preprocessed_seq)
padding[:predictions.shape[0], :predictions.shape[1]] = predictions
padding = padding.reshape(1, *padding.shape)
label_padding[:label_seq.shape[0], :label_seq.shape[1]] = label_seq
label_padding = label_padding.reshape(1, *label_padding.shape)
new_x_train.append(padding)
new_y_train.append(label_padding)
count += 1
new_x_train = | np.concatenate(new_x_train, axis=0) | numpy.concatenate |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if ax has the image plotted in pixels.
Returns
-------
Y coordinates : 1D numpy array
distance along the profile starting at 0
Z coordinates : 1D numpy array
profile
"""
plotargs = { key: kargs[key] for key in ['linewidth', 'color', 'linestyle'] if key in kargs }
if y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
if ax is not None:
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if kargs.get('axPixels', False):
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], col)
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], col)
ax.plot((x1, x2), (y1, y2), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2),.5*(y1+y2)), color=col)
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle((x1+dx,y1+dy),width, d, -np.degrees(np.arctan2(x2-x1,y2-y1)), color=col, alpha=alpha))
else:
h = self.pixels.shape[0]
pxs = self.size['real']['x'] / self.pixels.shape[1]
pys = self.size['real']['y'] / h
ax.plot([(x1-dx)*pxs, (x1+dx)*pxs], [(h-(y1-dy))*pys, (h-(y1+dy))*pys], col)
ax.plot([(x2-dx)*pxs, (x2+dx)*pxs], [(h-(y2-dy))*pys, (h-(y2+dy))*pys], col)
ax.plot((x1*pxs, x2*pxs), ((h-y1)*pys, (h-y2)*pys), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2)*pxs,.5*(2*h-y1-y2)*pys), color=col)
if alpha>0:
import matplotlib.patches
W = np.sqrt((2*dx*pxs)**2+(2*dy*pys)**2)
L = np.sqrt(((x2-x1)*pxs)**2+((y2-y1)*pys)**2)
ax.add_patch(matplotlib.patches.Rectangle(((x1+dx)*pxs,(y1+dy)*pys), W, L, -np.degrees(np.arctan2((x2-x1)*pxs,(y2-y1)*pys)), color=col, alpha=alpha))
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
I = scipy.interpolate.interp2d(x, y, np.flipud(self.pixels))
Y = np.arange(y1, y2+1)
V = np.zeros(len(Y))
for w in np.arange(width):
xl = np.linspace(x1-(width-1)/2.+w, x2-(width-1)/2.+w, len(Y))
for i in range(len(Y)):
Z = I(xl[i], Y[i])
V[i] += Z
return Y, V/width
def correct_median_diff(self, inline=True):
"""
Correct the image with the median difference
"""
N = self.pixels
# Difference of the pixel between two consecutive row
N2 = np.vstack([N[1:, :], N[-1:, :]])-N
# Take the median of the difference and cumsum them
C = np.cumsum(np.median(N2, axis=1))
# Extend the vector to a matrix (row copy)
D = np.tile(C, (N.shape[0], 1)).T
if inline:
self.pixels = N-D
else:
New = copy.deepcopy(self)
New.pixels = N-D
return New
def correct_slope(self, inline=True):
"""
Correct the image by subtracting a fitted slope along the y-axis
"""
s = np.mean(self.pixels, axis=1)
i = np.arange(len(s))
fit = np.polyfit(i, s, 1)
if inline:
self.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return New
def correct_plane(self, inline=True, mask=None):
"""
Correct the image by subtracting a fitted 2D-plane on the data
Parameters
----------
inline : bool
If True the data of the current image will be updated otherwise a new image is created
mask : None or 2D numpy array
If not None define on which pixels the data should be taken.
"""
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
X0, Y0 = np.meshgrid(x, y)
Z0 = self.pixels
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
A = np.column_stack((np.ones(Z.ravel().size), X.ravel(), Y.ravel()))
c, resid, rank, sigma = np.linalg.lstsq(A, Z.ravel(), rcond=-1)
if inline:
self.pixels -= c[0] * \
np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return self
else:
New = copy.deepcopy(self)
New.pixels -= c[0]*np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return New
def correct_lines(self, inline=True):
"""
Subtract the average of each line for the image.
if inline is True the current data are updated otherwise a new image with the corrected data is returned
"""
if inline:
self.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return New
def dist_v2(self, pixel=False):
"""
Return a 2D array with the distance between each pixel and the closest border.
Might be usefull for FFT filtering
"""
if pixel:
dx = 1
dy = 1
else:
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
x2 = np.arange(self.size['pixels']['x'])
x2 = (np.minimum(x2, self.size['pixels']['x']-x2) * dx)**2
y2 = np.arange(self.size['pixels']['y'])
y2 = (np.minimum(y2, self.size['pixels']['y'] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def inv_calc_flat(self, d, l=0.1):
"""
Function used for inverse MFM calculation (inspired from http://qmfm.empa.ch/qmfm/)
The function is in its early devlopment stage as not used by the developed.
Parameters
----------
d : float
Height distance in the input data
l : float
Tikhonov parameter for the deconvolution
"""
work_image = self.pixels
ny, nx = self.pixels.shape
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
k = self.dist_v2()
k[0, 0] = 1e-10
tf = np.exp(-d*k)
tf[0, 0] = np.mean(tf)
tf /= 2
tf *= 1-np.exp(-d * k)
recon_tf = np.ones(tf.shape) / (tf+l*np.ones(tf.shape) / np.conj(tf))
tf *= recon_tf
return np.real(np.fft.ifft2(np.fft.fft2(work_image)*recon_tf))
def get_extent(self):
"""
Get the image extent in real data
"""
if 'recorded' in self.size:
W = self.size['recorded']['real']['x']
H = self.size['recorded']['real']['y']
else:
W = self.size['real']['x']
H = self.size['real']['y']
return (0, W, 0, H)
def show(self, ax=None, sig=None, cmap=None, title=None,
adaptive=False, dmin=0, dmax=0, pixels=False, flip=False, wrap=None, mul=1, symmetric=False, **kargs):
"""
Function to display the image with a lot of parametrization
Parameters
----------
ax : matplotlib axis or None
matplotlib axis if given otherwise current axis will be used (plt.gca())
sig : float
sigma values to adjust the contrast range around the mean ±sig times the standard-deviation
cmap : string
colormap name used. By default a gray map is used. If the zscale of the data are in 'meter' (i.e. topography data) the 'hot' colormap is used
title : string
The title of the plot. By default is the channel name
adaptive : bool
The color scale used is linear. If adaptive is True a non linear color scale is used in order that each color is used with the same amount.
dmin : float
minimum value adjustment used for the colorscale
dmax: float
maximum value adjustment used for the colorscale
pixels : bool
Display the image with x/y-labels with real unit. If pixels is True, the axes are in pixels
flip : bool
Flip the image upside-down
wrap : Nont or int
wrap the title to a width of wrap chars
symmetric : bool
If True will place the middle of the colorscale to the value 0.
This is specially usefull for diverging colormaps such as : BrBG, bwr, coolwarm, seismiv, spectral, etc.
level : float
level should be ≥0 and <50. Adjust the lower and upper colorscale to level% and (100-level)% of the data range.
e.g. if level=1, the colorscale will display 1-99% of the data range
vmin : float
Minimum value used for the colorscale
vmax : flaot
Maximum value used for the colorscale
Returns
-------
matplotlib.image.AxesImage
matplolib axis instance returned by imshow
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, (ax, ax2) = plt.subplots(2, 3, figsize=(15, 10))
>>> topo.show(ax=ax[0], cmap='gray', title="color map=\"gray\"")
>>> topo.show(ax=ax[1], sig=2, title="standard deviation=2")
>>> topo.show(ax=ax[2], adaptive=True, title="Adaptive colormap")
>>> topo.show(ax=ax2[0], dmin=4e-8, cmap='gray', title="raise the lowest value for the colormap of +40nm")
>>> topo.show(ax=ax2[1], dmin=3e-8, dmax=-3e-8, cmap='gray',title="raise lower of +30nm and highest of -30nm")
>>> topo.show(ax=ax2[2], pixels=True, title="Set axis value in pixels");
"""
mpl.rc('axes', grid=False)
if ax is None:
ax = plt.gca()
ax.src = self
if title == None:
title = u"{0} - {1}".format(self.type, self.channel)
if wrap is not None:
title = "\n".join([title[i*wrap:(i+1)*wrap]
for i in range(int(len(title)/wrap)+1)])
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1 or unit in ['pixels']:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
W = self.size['real']['x']
H = self.size['real']['y']
fact = int(np.floor(np.log(W)/np.log(10)/3))
isunit += fact
W, H = W/10**(fact*3), H/10**(fact*3)
if cmap == None:
cmap = 'gray'
if unit == 'm' and self.channel == "Topography":
cmap = 'hot'
mi, ma = np.nanmin(self.pixels), np.nanmax(self.pixels)
if adaptive:
img = np.asarray(256**2*(self.pixels-mi)/(ma-mi), dtype=np.uint16)
mi, ma = 0, 1
img = skimage.exposure.equalize_adapthist(img, clip_limit=0.03)
else:
img = mul*self.pixels
mi *= mul
ma *= mul
if sig == None:
vmin = mi+dmin
vmax = ma+dmax
else:
std = np.nanstd(img)
avg = np.nanmean(img)
vmin = avg - sig * std
vmax = avg + sig * std
if 'level' in kargs:
if kargs['level'] < 0 or kargs['level']>=50:
raise ValueError("The level shoud have a value in [0,50)")
vmax = np.percentile(img, 100-kargs['level'])
vmin = np.percentile(img, kargs['level'])
del kargs['level']
if 'vmin' in kargs:
vmin = kargs['vmin']
del kargs['vmin']
if 'vmax' in kargs:
vmax = kargs['vmax']
del kargs['vmax']
if symmetric:
vmax = abs(max(vmin,vmax))
vmin = -vmax
if not flip:
ax.flipped = False
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), extent=[0, W, 0, H], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.flipped = True
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), cmap=cmap, extent=[0, W, 0, H], vmin=vmin, vmax=vmax, **kargs)
if pixels:
ax.set_xlim((0, self.pixels.shape[1]))
if flip:
ax.set_ylim((0, self.pixels.shape[0]))
else:
ax.set_ylim((self.pixels.shape[0], 0))
else:
ax.set_xlim((0,W))
if flip:
ax.set_ylim((H,0))
else:
ax.set_ylim((0,H))
if not pixels:
if isunit != 6:
u = sunit[isunit]
if u == 'u':
u = '$\\mu$'
ax.set_xlabel(u'x [{0}{1}]'.format(u, unit))
ax.set_ylabel(u'y [{0}{1}]'.format(u, unit))
else:
ax.set_xlabel(u'x [{0}]'.format(unit))
ax.set_ylabel(u'y [{0}]'.format(unit))
if title != None:
ax.set_title(title)
return r
def real2px(self, x, y):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
return self.real2pixels(x,y)
def real2pixels(self, x, y, float=False):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not float:
px = np.digitize(x, np.linspace(0,self.size['real']['x']/(10**fact),self.pixels.shape[1]), right=True)
py = np.digitize(y, np.linspace(0,self.size['real']['y']/(10**fact),self.pixels.shape[0]), right=False)
else:
px = x*(self.pixels.shape[1]-1)/(self.size['real']['x']/(10**fact))
py = y*(self.pixels.shape[0]-1)/(self.size['real']['y']/(10**fact))
return px, py
def px2real(self, x, y):
"""
Transform a (x,y) value from pixels to real
Units are the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor( | np.log(W) | numpy.log |
#!/usr/bin/python
#
# Copyright 2018, <NAME>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from time import time
import os,sys,re,subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import random
import logging
import argparse
import pickle
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist,squareform
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold,GridSearchCV
from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError:
print("INFO: Please install seaborn package for plotting.")
__author__ = 'chris'
def extract_features(mol, sourcename, pos, printHeader=True, fillNa=np.nan, xyz_file=None, plot=False, useSelectionRules=True, OrderAtoms=True, bondAngles=True, skipH=False, addBonds=False, verbose=False):
"""
Create feature matrix from RDKit mol object or xyz file
:param mol: RDKit molecule
:param sourcename: name of sd file
:param pos: position in sdf
:param xyz_file: name of xyz
:param plot: plotting
:param useSelectionRules: use rules to remove strange bonds
:param OrderAtoms: larger atomic number first
:param bondAngles: add bond angles, i.e. distance to third atom
:param skipH: remove H
:param addBonds: add neighbor bonds as features
:param printHeader: prints column headers
:param fillNa: how to fill NA values
:param verbose: verbosity on/off
:return: pandas dataframe with feature matrix
"""
pt = Chem.GetPeriodicTable()
if xyz_file is not None:
if xyz_file.lower().endswith(".xyz"):
atomtypes, coords, q, title = read_xyz(xyz_file, skipH=skipH)
elif xyz_file.lower().endswith(".pdb"):
atomtypes, coords, q, title = read_pdbfile(xyz_file, skipH=skipH)
if q!=0:
logging.info("Found charge: %.2f"%(q))
dm = squareform(pdist(np.asarray(coords)))
else:
if skipH:
try:
mol = Chem.RemoveHs(mol)
except ValueError as e:
logging.info("Skipping H deletion for molecule at pos:" + str(pos))
return(None)
#check if bonds are available
try:
if not addBonds and mol.GetNumBonds(onlyHeavy=False)==0:
logging.info("No bonds found: skipping molecule %s " %Chem.MolToSmiles(mol))
return (None)
except RuntimeError as e:
logging.info("RuntimeError: skipping molecule")
return(None)
dm = Chem.Get3DDistanceMatrix(mol) # both should be the same!!!
q = Chem.GetFormalCharge(mol)
n,m = dm.shape
assert(n == m)
if plot:
plt.pcolormesh(dm)
plt.colorbar()
plt.xlim([0, n])
plt.ylim([0, n])
plt.show()
dist_cut = 3.0 # distance cutoff
n_cut = 3 # neighbour cutoff
if printHeader and verbose:
print('{:<4s}{:<4s}{:>4s}{:>3s}{:>3s}{:>8s}'.format('ID1','ID2','Q', '#1', '#2', 'DIST'),end='')
for i in range(2*n_cut):
if addBonds:
print('{:>4s}{:>3s}{:>8s}{:>8s}{:>4s}'.format('POS', '#', 'DIST', 'DISTB','BNB'),end='')
elif bondAngles:
print('{:>4s}{:>3s}{:>8s}{:>8s}'.format('POS', '#', 'DIST','DISTB'),end='')
else:
print('{:4s}{:3s}{:8s}'.format('POS', '#', 'DIST'),end='')
print("{:4s}".format('TYPE'))
df = []
index = []
for i in range(0,n):
if xyz_file is not None:
bnd_at1 = atomtypes[i]
bond_num1 = pt.GetAtomicNumber(bnd_at1)
else:
bnd_at1 = mol.GetAtomWithIdx(i)
bond_num1 = bnd_at1.GetAtomicNum()
bnd_at1 = bnd_at1.GetSymbol()
for j in range(0,m):
row = []
if i >= j: continue
bnd_dist = dm[i,j]
if bnd_dist>dist_cut: continue
bnd_type = 0
if xyz_file is None:
bnd_at2 = mol.GetAtomWithIdx(j)
bond_num2 = bnd_at2.GetAtomicNum()
bnd = mol.GetBondBetweenAtoms(i, j)
if bnd is not None:
bnd_type = int(bnd.GetBondTypeAsDouble())
if bnd.GetIsAromatic():
bnd_type = 4
else:
bnd_type = 0
bnd_at2=bnd_at2.GetSymbol()
else:
bnd_at2 = atomtypes[j]
bond_num2 = pt.GetAtomicNumber(bnd_at2)
#sanity checks
if xyz_file is None:
# we accept very short bonds but give warning
selstr = "Skipping"
if not useSelectionRules:
selstr = "Keeping"
if bnd_dist<0.75 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<0.75): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
elif bnd_dist<1.1 and bond_num1>=6 and bond_num2>=6 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<1.1): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
# in case of problems we discard whole molecule
elif bnd_dist < 0.75 and (bond_num1 == 1 or bond_num2 == 1) and bnd_type == 0:
logging.warn("%s unreasonable short X-H distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return (None)
elif bnd_dist < 1.5 and bond_num1==6 and bond_num2==6 and bnd_type==0:
logging.warn("%s unreasonable short C-C distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
elif bnd_dist < 1.0 and bond_num1>=6 and bond_num2>=6 and bnd_type==0:
logging.warn("%s unreasonable short distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
# rather generous cutoff
elif bnd_dist>1.8 and bond_num1==6 and bond_num2==6 and bnd_type>0:
logging.warn("%s unreasonable long C-C bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(selstr,bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
if useSelectionRules: return(None)
#unique order
if OrderAtoms and bond_num1<bond_num2:
row.extend([j + 1, i + 1, q,bond_num2, bond_num1, bnd_dist])
i_tmp,j_tmp = j,i
else:
row.extend([i + 1, j + 1, q,bond_num1, bond_num2, bnd_dist])
i_tmp, j_tmp = i, j
if verbose: print('{:<4d}{:<4d}{:4.1f}{:3d}{:3d}{:8.3f}'.format(i_tmp+1,j_tmp+1,q,bond_num1,bond_num2,bnd_dist),end='')
# now iterate over neighbors of a and b and i.e. sort row a and b and concat, then skip i and j
for a in [i_tmp,j_tmp]:
row_sorted_a = np.argsort(dm[a,:])
count = 0
k = 0
if len(row_sorted_a) > 2:
for nextn in row_sorted_a:
nextn = int(nextn)
if nextn == j_tmp or nextn == i_tmp:
continue
if k==n_cut:break
dist = dm[a,nextn]
if xyz_file is None:
at = mol.GetAtomWithIdx(nextn)
num = at.GetAtomicNum()
at = at.GetSymbol()
else:
at = atomtypes[nextn]
num = pt.GetAtomicNumber(at)
if bondAngles:
other = i_tmp if a==j_tmp else j_tmp
distb = dm[other,nextn]
if addBonds:
bndb = mol.GetBondBetweenAtoms(a, nextn)
if bndb is not None:
bnd_typeb = int(bndb.GetBondTypeAsDouble())
if bndb.GetIsAromatic():
#bnd_type=randint(1,2)
bnd_typeb = 4
else:
bnd_typeb = 0
row.extend([num, dist, distb,bnd_typeb])
if verbose:
print('{:4d}{:>3d}{:8.3f}{:8.3f}{:4d}'.format(nextn+1,num,dist,distb,bnd_typeb),end='')
else:
row.extend([num, dist,distb])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}{:8.3f}'.format(nextn+1,at,num,dist,distb),end='')
else:
row.extend([num, dist])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}'.format(nextn+1,at,num,dist),end='')
k += 1
count += 1
# padding
while count<n_cut:
count += 1
if verbose:
print('{:>4d}{:>3s}{:3d}{:8.3f}'.format(0,"NA", 0, fillNa),end='')
row.extend([0, fillNa])
if bondAngles:
row.extend([fillNa])
if verbose: print('{:4d}'.format( bnd_type),end='')
row.append(bnd_type)
df.append(row)
index.append(sourcename + '_pos' + str(pos+1) + '_' + str(i_tmp + 1) + 'x' + str(j_tmp + 1))
try:
df = pd.DataFrame(df)
colnames = ['id1','id2','q','ata','atb','distab','ata1','dista1','ata2','dista2','ata3','dista3','atb1','distb1','atb2','distb2','atb3','distb3','bond']
if addBonds:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1', 'dista1b','bonda1', 'ata2', 'dista2',
'dista2b','bonda2', 'ata3', 'dista3', 'dista3b','bonda3',
'atb1', 'distb1', 'distb1a','bondb1', 'atb2', 'distb2', 'distb2a','bondb2', 'atb3', 'distb3', 'distb3a','bondb3', 'bond']
elif bondAngles:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1','dista1b', 'ata2', 'dista2','dista2b', 'ata3', 'dista3','dista3b',
'atb1', 'distb1','distb1a', 'atb2', 'distb2','distb2a', 'atb3', 'distb3','distb3a','bond']
if len(colnames)!=len(df.columns):
logging.error("Mismatch in dataframe colums for %s - SMILES: %s"%(sourcename+'_pos'+str(pos+1), Chem.MolToSmiles(mol)))
df.columns = colnames
df.index = index
except ValueError:
#i.e. for empty dataframes
df = None
return df
def convert_sdf2dataframe(infile, outfile="moldat.csv", fillNa=np.nan, sanitize=True, tempsave=False, useSelectionRules=True, skipH=False, addBonds=True, sample=None, debug=False, verbose=False):
"""
Generate training dataset from list of sd files
sd file -> Pandas DataFrame
:param infile: sd file used for training
:param outfile: feature matrix as .csv file
:param fillNa: fill value for NA positions
:param sanitize: switch this off for special molecules RDKit cannot digest, should be True in order to have aromatic bonds
:param tempsave: save temporary data
:param useSelectionRules: apply rules to filter nonsense structures
:param skipH: remove hydrogens
:param addBonds: inject neighbor bonds to feature matrix
:param sample: subsample dataset fraction [0-1]
:param verbose: verbosity on/off
:return: feature matrix as pandas dataframe
"""
logging.info("Generating feature using RDKit matrix from: %s -- with options skipH (%r) iterative(%r) filterRubbish(%r) "%(infile,skipH,addBonds,useSelectionRules))
if sample is not None:
logging.info("Subsampling fraction %4.2f of dataset"%(sample))
np.random.seed(42)
df_new = None
suppl = Chem.SDMolSupplier(infile,removeHs=skipH,sanitize=False)
count=0
for i,mol in enumerate(suppl):
if sanitize:
try:
Chem.SanitizeMol(mol) #adding aromatic bonds...we may have a problem here
except ValueError as e:
logging.info("Skipping sanitization for molecule at pos:" + str(i+1))
if debug:
w = Chem.SDWriter('tmp_pos'+str(i+1)+'.sdf')
w.write(mol)
w.close()
# we cannot use it then...
if mol is not None:
if sample is not None and np.random.random_sample()>sample:
continue
if i>0:
df_new = pd.concat([df_new, extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)], axis=0)
else:
df_new = extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)
count += 1
else:
logging.info("SKIPPING molecule at pos:"+str(i+1))
logging.error("SKIPPING molecule at pos:" + str(i+1))
logging.info("Processed total of >%d< molecules" % (count))
if df_new is not None and tempsave:
logging.info("%3d Generated temp file: %s" % (i + 1, outfile))
df_new.to_csv(outfile,index=True)
if df_new is None:
logging.info("ERROR: There was a problem generating the data!")
logging.info("Bond types: \n%r"%(df_new['bond'].value_counts()))
logging.info("Total bonds: %r\n" % (df_new['bond'].value_counts().sum()))
return(df_new)
def convert_sdfiles2csv(file_list = [], base_dir='', outdat='train_dat.csv', method='UFF', skipH=False, addBonds=False, sample=0.25, verbose=False):
"""
Allows for training use a list of filenames, for internal testing
:param file_list: list of .sd files
:param base_dir: location of those files
:param outdat: .csv file with feature matrix and target vectors
"""
finalf = outdat
for i,f in enumerate(file_list):
infile = base_dir+f
if not os.path.isfile(infile):
logging.critical("File not found:"+infile)
logging.critical("CWD:"+os.getcwd())
sys.exit(1)
outfile = 'moldat_tmp.csv'
if infile.endswith('.smi'):
infile = convert_smiles2sdfile(smifile=infile, outdat=outfile, method=method, verbose=verbose)
infile = infile.replace(".smi",".sdf")
print(infile)
df = convert_sdf2dataframe(infile=infile, outfile=outfile, fillNa=9999.0, skipH=skipH, addBonds=addBonds, sample=sample, verbose=verbose)
if df is None: continue
outstr = 'writing'
mode = 'w'
header = True
if os.path.isfile(finalf):
mode = 'a'
header = False
outstr = 'appending'
with open(finalf, mode) as f:
df.to_csv(f, header=header, index=True)
print(df.head())
logging.info("File: %3d - %s .csv file to: %s" % (i + 1, outstr, finalf))
def train_from_csv(filename, grid_search=False, useRF=False, plotClassifier=False, save_clf='clf.p',verbose=False):
"""
Train bond data with sklearn classifier, final model gets pickled.
:param filename: .csv file with feature matrix
:param grid_search: Do a parameter search on grid
:return: trained scikit-learn model
"""
logging.info("Training data on dataset:")
df = pd.read_csv(filename,index_col=0)
if 'id1' in df.columns and 'id2' in df.columns:
df.drop(['id1', 'id2'], axis=1,inplace=True)
logging.info("Shape : %d X %d"%(df.shape[0],df.shape[1]))
logging.info("Features: %s" % (df.columns))
# remove similar data
logging.info("Droping duplicates...")
df.drop_duplicates(inplace=True)
logging.info("Shape : %d X %d" % (df.shape[0], df.shape[1]))
y = df['bond']
X = df.drop(['bond'],axis=1,inplace=False)
if plotClassifier:
tree = DecisionTreeClassifier( max_depth=5)
tree.fit(X,y)
dot_data = tree.export_graphviz(tree, out_file='tree')
import graphviz
graph = graphviz.Source(dot_data)
graph.render("decisiontree")
n_jobs = 1
n_splits = 4
if useRF:
model = RandomForestClassifier(n_estimators=250, max_depth=None, min_samples_leaf=5, n_jobs=n_jobs,
max_features=11, oob_score=False)
else:
#model = xgb.XGBClassifier(n_estimators=2000, learning_rate=0.01, max_depth=5, NA=0, subsample=.5,colsample_bytree=1.0, min_child_weight=5, n_jobs=4, objective='multi:softprob',num_class=5, booster='gbtree', silent=1, eval_size=0.0)
#parameters = {'n_estimators': [2000], 'learning_rate': [0.01, 0.1, 0.001], 'max_depth': [5, 7],'subsample': [0.5]}
model = GradientBoostingClassifier(n_estimators=1000,learning_rate=0.1,max_depth=5,verbose=1)
parameters = {}
if grid_search:
#model.set_params(n_jobs=1)
n_jobs = 4
cv = StratifiedKFold(n_splits=n_splits)
model = GridSearchCV(model, parameters, n_jobs=n_jobs, verbose=2, scoring='f1_micro', cv=cv,refit=True)
model.fit(X,y)
means = model.cv_results_['mean_test_score']
stds = model.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, model.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print(model)
else:
logging.info("Fitting classifier: %s"%(model))
model.fit(X, y)
pickle.dump(model,open( save_clf, "wb" ))
logging.info("Saving classifier as: %s"%(save_clf))
return(model)
def train_job(filename, reset=True, eval=False, fmethod='UFF', skipH=False, iterative=False, sample=False, useRF=False,verbose=False):
"""
Use either .sdf or .smi file to
train from a new dataset or append data
:param filename: name of .smi of .sd file
:param reset: removes old training data
"""
if eval:
train_file = 'eval_dat.csv'
reset=True
else:
train_file = 'train_dat.csv'
iter_file = ""
if iterative and not eval:
logging.info("Iterative mode switched ON!")
iter_file = train_file.replace("_dat","_iter")
if useRF and not eval:
logging.info("INFO: Using Random Forest for training!")
if reset:
if os.path.isfile(train_file):
os.remove(train_file)
if os.path.isfile(iter_file):
os.remove(iter_file)
if filename.endswith('.sdf') or filename.endswith('.sd'):
convert_sdfiles2csv(file_list=[filename], outdat=train_file, skipH=skipH, addBonds=False, sample=sample, verbose=verbose)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, skipH=skipH, addBonds=True, sample=sample, verbose=verbose)
elif filename.endswith('.smi'):
logging.info("Using forcefield for optimization: %s" % (fmethod))
convert_sdfiles2csv(file_list=[filename], outdat=train_file, method=fmethod, skipH=skipH, addBonds=False)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, method=fmethod, skipH=skipH, addBonds=True, verbose=verbose)
if not os.path.isfile(train_file):
sys.stderr.write("ERROR: Missing training data file: %s!\n"%(train_file))
sys.exit(1)
if eval:
evaluate(train_file,iterative=iterative, verbose=verbose)
else:
train_from_csv(train_file, useRF=useRF, verbose=verbose)
if iterative:
train_from_csv(iter_file,useRF=useRF, save_clf="clf_iter.p", verbose=verbose)
def eval_job(filename, skipH=False, iterative=False,verbose=False):
"""
Evaluation per! molecule
:param filename: filename for evaluation
:param skipH: omit hydrogen
:param iterative: use 2nd classifier
:param verbose: verbose mode
:return: -
"""
# iterate over mols of SDF
# mol -> df -> bonds_predicted / bonds_true
# make SDF -> extract features -> df -> bonds_predicted2
# compare bonds_true & bonds_predicted2
# generatePredictions with mol
print("Evaluation run with option: noH(%r)" % (skipH))
print("Loading classifier...")
clf = pickle.load(open('clf.p', "rb"))
if iterative:
clf_iter = pickle.load(open('clf_iter.p', "rb"))
else:
clf_iter = None
suppl = Chem.SDMolSupplier(filename, removeHs=skipH, sanitize=iterative)
nok = 0
nfalse = 0
for i, mol in enumerate(suppl):
if mol is None: continue
res = generate_predictions(mol, skipH=skipH, iterative=True, forceAromatics=False, maxiter=1, verbose=verbose,
clf=clf, clf_iter=clf_iter, isEval=True)
if res is None: continue
if i % 50 == 0:
logging.info("%d %r\n" % (i, res))
if res:
nok += 1
else:
nfalse += 1
nall = len(suppl)
acc = nok / float(nall)
logging.info("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f" % (nall, nok, nfalse, acc))
def evaluate(filename_test,filename_train='train_dat.csv',plotting=True,iterative=False,verbose=False):
"""
Evaluate on dataset with known bond info, molecule accuracy is computed afterwards
:param filename_test: name of .csv file with feature matrix and targets
"""
df = pd.read_csv(filename_test,index_col=0)
filename_train=None
# shown train_data
if filename_train is not None:
logging.info("Analyze train data...")
df_train = pd.read_csv(filename_train,index_col=0)
print(df_train.shape)
df_train['bondtype']=df_train['bond'].astype('category')
df_train = df_train[df_train.ata==6]
df_train = df_train[df_train.atb==6]
if plotting:
ax = sns.boxplot(x="bond", y="distab", data=df_train[['distab','bond']])
ax.set(ylabel='C-C distance', xlabel='bond type')
#ax.set(xticklabels=[])
plt.show()
logging.info("Evaluate data set: " + filename_test)
logging.info("Loading classifier...")
clf = pickle.load(open("clf.p", "rb"))
logging.info("Loading test set with %d rows from file %s\n"%(df.shape[0],filename_test))
y = df['bond']
X = df.drop(['bond','id1','id2'],axis=1,inplace=False)
yprob = clf.predict_proba(X)
ypred = clf.predict(X)
score = accuracy_score(y,ypred)
score2 = f1_score(y,ypred,average='weighted')
logging.info("ACCURACY:%0.3f - F1-score: %0.3f\n" % (score,score2))
X['bond_pred'] = ypred
X['p(-)'] = yprob[:, 1]
X['p(=)'] = yprob[:, 2]
X['p(#)'] = yprob[:, 3]
X['p(a)'] = yprob[:, 4]
X['bond'] = y
if plotting:
print("Misclassification stats:")
idx = (ypred != y)
df_tmp = X[idx.values]
print(df_tmp[['ata','atb','distab','bond','bond_pred']].head(200).sort_values(['ata']))
plot_classification_results(y,ypred)
mol_df_list = mol_dataframe_generator(X)
all=0
ok=0
not_ok=0
false_indices=[]
for name, df_sub in mol_df_list:
all += 1
if iterative:
print("ERROR: Iterative - does not work in fast evaluation mode..")
sys.exit(1)
# ok no coordinates/no dm how to get feature matrix...????
if np.array_equal(df_sub['bond_pred'].values, df_sub['bond'].values):
ok += 1
else:
# print("FALSE: %s"%(name))
not_ok += 1
mask = df_sub['bond_pred'] != df_sub['bond']
idx = np.argmax(mask)
false_indices.append(idx)
acc = ok/float(all)
print(false_indices)
print("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f"%(all,ok,not_ok,acc))
return(X)
def evaluate_OB(filename='fullerene_ml.sdf', verbose=False):
"""
Evaluation via Open Babel
:param filename: sd file
:param removeHs: use H or not (obabel reorders X-H bonds...)
:param verbose: True for verbose
:return: -
"""
logging.info("Evaluating %s via OBabel"%(filename))
#if sanitize:
# print("WARNING: Switched ON sanitization!")
#else:
# print("WARNING: Switched OFF sanitization!")
suppl = Chem.SDMolSupplier(filename, removeHs=False, sanitize=True)
nok = 0
nfalse = 0
nall = len(suppl)
for i, mol in enumerate(suppl):
if mol is None: continue
xyz_str = mol2xyz(mol)
#remove H for comparison with OB
mol = Chem.RemoveHs(mol)
df_orig = extract_features(mol, "babel_orig", (i+1), skipH=True)
if df_orig is None: continue
bond_orig = df_orig['bond']
#generate xyz for OB prediction without H
myfile = StringIO.StringIO(xyz_str)
#if removeHs:
#cmd_call = ["obabel", "-d","-ixyz", "-osdf"]
#else:
cmd_call = ["obabel", "-ixyz", "-osdf"]
p = subprocess.Popen(cmd_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
molblock, err = p.communicate(myfile.read())
#switch off sanitization
#mol_pred_H = Chem.MolFromMolBlock(molblock,removeHs=False,sanitize=False)
#always switch off H for comparison of main element bonds only
mol_pred = Chem.MolFromMolBlock(molblock,removeHs=True,sanitize=False)
if mol_pred is None:
nfalse += 1
continue
df = extract_features(mol_pred, "obabel", 0, skipH=True)
if df is None:
nfalse += 1
continue
if len(bond_orig)!=len(df['bond'].values):
logging.error("Original (%d) and predicted bond vector (%d) have different length!"%(len(bond_orig),len(df['bond'].values)))
if verbose:
mol_pred_noH = Chem.RemoveHs(mol_pred)
Chem.Compute2DCoords(mol_pred_noH)
Chem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol_pred_noH, mol], molsPerRow=2, subImgSize=(400, 400),
legends=['ob' + str(i + 1), 'orig' + str(i + 1)])
img.show()
if | np.array_equal(bond_orig.values, df['bond'].values) | numpy.array_equal |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input, Model
#==============================================================================
#==============================================================================
KPN = pd.read_csv('/Users/henriwoodcock/Documents/University/Year_3/MATH3001'
'/Code/Data/Code_Data/new_data_copy.csv', usecols=[1,2,3,4,5
,6,7,8,9,10
,11,12,13,14,15
,16,17,18,19,20,
21,22,23]
).astype('float32')
'''
train, test = Split_Train_Test(KPN, 0.1)
'''
'''training data:'''
'''
X_train = train.iloc[:,0:6] #input
y_train = train["output"] #output
'''
'''testing data:'''
'''
X_test = test.iloc[:,0:6]
y_test = test["output"]
'''
'''standardising the input data:'''
'''
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
'''
#==============================================================================
#==============================================================================
'''Creating k-folds'''
sc = StandardScaler()
k = 5
set_size = len(KPN)//(k+1)
features = 15
#fold 1:
fold1_train = sc.fit_transform(KPN.iloc[:set_size,0:features])
fold1_train_y = KPN["output"][:set_size]
fold1_test = sc.transform(KPN.iloc[set_size:2*set_size,0:features])
fold1_test_y = KPN["output"][set_size:2*set_size]
fold1_test_y = fold1_test_y.reset_index(drop=True)
#fold 2:
fold2_train = sc.fit_transform(KPN.iloc[:2*set_size,0:features])
fold2_train_y = KPN["output"][:2*set_size]
fold2_test = sc.transform(KPN.iloc[2*set_size:3*set_size,0:features])
fold2_test_y = KPN["output"][2*set_size:3*set_size]
fold2_test_y = fold2_test_y.reset_index(drop=True)
#fold 3:
fold3_train = sc.fit_transform(KPN.iloc[:3*set_size,0:features])
fold3_train_y = KPN["output"][:3*set_size]
fold3_test = sc.transform(KPN.iloc[3*set_size:4*set_size,0:features])
fold3_test_y = KPN["output"][3*set_size:4*set_size]
fold3_test_y = fold3_test_y.reset_index(drop=True)
#fold 4:
fold4_train = sc.fit_transform(KPN.iloc[:4*set_size,0:features])
fold4_train_y = KPN["output"][:4*set_size]
fold4_test = sc.transform(KPN.iloc[4*set_size:5*set_size,0:features])
fold4_test_y = KPN["output"][4*set_size:5*set_size]
fold4_test_y = fold4_test_y.reset_index(drop=True)
#fold 5:
fold5_train = sc.fit_transform(KPN.iloc[:5*set_size,0:features])
fold5_train_y = KPN["output"][:5*set_size]
fold5_test = sc.transform(KPN.iloc[5*set_size:6*set_size,0:features])
fold5_test_y = KPN["output"][5*set_size:6*set_size]
fold5_test_y = fold5_test_y.reset_index(drop=True)
#==============================================================================
#==============================================================================
'''Auto-Encoding:'''
num_neurons = 2
regulariser = keras.regularizers.l2(0.01)
#input placeholder:
stock_in = Input(shape=(features,))
#encoded representation:
encoded = layers.Dense(num_neurons, activation="relu",
kernel_regularizer = regulariser
)(stock_in)
#decoded representation:
decoded = layers.Dense(features, activation = "linear",
kernel_regularizer = regulariser
)(encoded)
autoencoder = Model(stock_in, decoded)
encoder = Model(stock_in, encoded)
encoded_input = Input(shape=(num_neurons,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='sgd', loss='mean_squared_error')
'''
autoencoder.fit(X_train,X_train, epochs = 500, verbose = 1)
encoded_imgs = encoder.predict(X_test)
decoded_imgs = decoder.predict(encoded_imgs)
encoded_Train = encoder.predict(X_train)
encoded_Test = encoder.predict(X_test)
'''
#==============================================================================
#==============================================================================
'''auto-encoding the folds:'''
Epochs = 100
#fold 1:
autoencoder.fit(fold1_train,fold1_train, epochs = Epochs, verbose = 1)
enc_fold1_train = encoder.predict(fold1_train)
enc_fold1_test = encoder.predict(fold1_test)
#fold 2:
autoencoder.fit(fold2_train,fold2_train, epochs = Epochs, verbose = 1)
enc_fold2_train = encoder.predict(fold2_train)
enc_fold2_test = encoder.predict(fold2_test)
#fold 3:
autoencoder.fit(fold3_train,fold3_train, epochs = Epochs, verbose = 1)
enc_fold3_train = encoder.predict(fold3_train)
enc_fold3_test = encoder.predict(fold3_test)
#fold 4:
autoencoder.fit(fold4_train,fold4_train, epochs = Epochs, verbose = 1)
enc_fold4_train = encoder.predict(fold4_train)
enc_fold4_test = encoder.predict(fold4_test)
#fold 5:
autoencoder.fit(fold5_train,fold5_train, epochs = Epochs, verbose = 1)
enc_fold5_train = encoder.predict(fold5_train)
enc_fold5_test = encoder.predict(fold5_test)
#==============================================================================
#==============================================================================
def SVR_Train(X,y,kern,eps, gam, c):
piped_svr = Pipeline([#('scaler', StandardScaler()),
('svr', SVR(kernel=kern,epsilon=eps,gamma=gam,
C=c)),
])
return piped_svr.fit(X,y)
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
def trend(prediction):
trend_vector = []
for i in range(len(prediction)-1):
if prediction[i+1] >= prediction[i]:
trend_vector.append(1)
else:
trend_vector.append(-1)
return trend_vector
def hits(actual, pred):
hits = 0
for i in range(len(actual)):
if actual[i] == pred[i]:
hits +=1
return hits
#==============================================================================
#==============================================================================
eps = 0.1
sig_sqaured = 25
gam = 1/sig_sqaured
c = 100
#fold 1:
svr_fold1 = SVR_Train(enc_fold1_train,fold1_train_y,'rbf',eps,gam,c)
fold1_predict = svr_fold1.predict(enc_fold1_test)
fold1_act_trend = trend(fold1_test_y)
fold1_pred_trend = trend(fold1_predict)
fold1_hits = hits(fold1_act_trend,fold1_pred_trend)
fold1_perc = fold1_hits / len(fold1_act_trend)
#fold 2:
svr_fold2 = SVR_Train(enc_fold2_train,fold2_train_y,'rbf',eps,gam,c)
fold2_predict = svr_fold2.predict(enc_fold2_test)
fold2_act_trend = trend(fold2_test_y)
fold2_pred_trend = trend(fold2_predict)
fold2_hits = hits(fold2_act_trend,fold2_pred_trend)
fold2_perc = fold2_hits / len(fold2_act_trend)
#fold 3:
svr_fold3 = SVR_Train(enc_fold3_train,fold3_train_y,'rbf',eps,gam,c)
fold3_predict = svr_fold3.predict(enc_fold3_test)
fold3_act_trend = trend(fold3_test_y)
fold3_pred_trend = trend(fold3_predict)
fold3_hits = hits(fold3_act_trend,fold3_pred_trend)
fold3_perc = fold3_hits / len(fold3_act_trend)
#fold 4:
svr_fold4 = SVR_Train(enc_fold4_train,fold4_train_y,'rbf',eps,gam,c)
fold4_predict = svr_fold4.predict(enc_fold4_test)
fold4_act_trend = trend(fold4_test_y)
fold4_pred_trend = trend(fold4_predict)
fold4_hits = hits(fold4_act_trend,fold4_pred_trend)
fold4_perc = fold4_hits / len(fold4_act_trend)
#fold 5:
svr_fold5 = SVR_Train(enc_fold5_train,fold5_train_y,'rbf',eps,gam,c)
fold5_predict = svr_fold5.predict(enc_fold5_test)
fold5_act_trend = trend(fold5_test_y)
fold5_pred_trend = trend(fold5_predict)
fold5_hits = hits(fold5_act_trend,fold5_pred_trend)
fold5_perc = fold5_hits / len(fold5_act_trend)
plt.plot(fold5_predict)
plt.plot(fold5_test_y)
def mse(act,pred):
errr = 0
for i in range(len(act)):
errr += (act[i]-pred[i])**2
errr = errr/len(act)
return errr
def mae(act,pred):
errr = 0
for i in range(len(act)):
error = act[i] - pred[i]
if error > 0:
errr+=error
else:
errr+= (-error)
errr = errr/len(act)
return errr
mse_5 = mse(np.array(fold5_test_y),fold5_predict)
mse_4 = mse(np.array(fold4_test_y),fold4_predict)
mse_3 = mse(np.array(fold3_test_y),fold3_predict)
mse_2 = mse(np.array(fold2_test_y),fold2_predict)
mse_1 = mse(np.array(fold1_test_y),fold1_predict)
mae_5 = mae(np.array(fold5_test_y),fold5_predict)
mae_4 = mae( | np.array(fold4_test_y) | numpy.array |
#!/usr/bin/env python
"""
This module provide high-level class that provides simplified APIs for
conducting different experiment.
"""
import os
import pathlib
import bluesky
import ophyd
import epics
import databroker
import numpy as np
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.suspenders import SuspendFloor
from bluesky.simulators import summarize_plan
from time import sleep
from .devices.beamline import Beam, SimBeam
from .devices.beamline import FastShutter
from .devices.motors import StageAero, SimStageAero
from .devices.motors import EnsemblePSOFlyDevice
from .devices.detectors import PointGreyDetector, DexelaDetector, SimDetector
from .util import dict_to_msg
from .util import load_config
from .util import is_light_on
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
class Experiment:
"""Generic expriment handler"""
def __init__(self, mode='debug'):
self.RE = bluesky.RunEngine({})
# self.db = databroker.Broker.named("mongodb_config")
# self.RE.subscribe(self.db.insert)
self.RE.subscribe(BestEffortCallback())
self._mode = mode
self.shutter = Experiment.get_main_shutter(mode)
self.suspend_shutter = SuspendFloor(self.shutter.pss_state, 1)
# TODO:
# create fast shutter here
# for 1id the fast shutter PV is
# FS1PV: 1id:softGlue:AND-1_IN1_Signal
# FS2PV: 1id:softGlue:AND-2_IN1_Signal
# There are also mask PVs to enable and disable FS control with different signals
# FS1maskPV: 1id:softGlue:AND-1_IN2_Signal
# FS2maskPV: 1id:softGlue:AND-2_IN2_Signal
# The mask PV is used to trigger the FS with other hardware triggers
# mostly along with the detector, so that beam is off when not acquiring
# i.e. control with sweep: epics_put(sprintf("%s",FS_control_PV), "Sweep", SGtime)
# still not entirely sure how this works, maybe bluesky already has this trigger mode?
# monitor APS current
# NOTE: only start monitoring APS ring status during production
if mode.lower() in ['production']:
from apstools.devices import ApsMachineParametersDevice
self._aps = ApsMachineParametersDevice(name="APS")
self.suspend_APS_current = SuspendFloor(self._aps.current, 2, resume_thresh=10)
self.RE.install_suspender(self.suspend_APS_current)
@staticmethod
def get_main_shutter(mode):
"""
return
simulated shutter when [dryrun, debug]
acutal shutter when [productio]
TODO:
need to update with acutal PV for 6-ID-D
"""
if mode.lower() in ['debug', 'dryrun']:
from apstools.devices import SimulatedApsPssShutterWithStatus
A_shutter = SimulatedApsPssShutterWithStatus(name="A_shutter")
elif mode.lower() == 'production':
from apstools.devices import ApsPssShutterWithStatus
A_shutter = ApsPssShutterWithStatus(
"PA:01ID", # This is for 1ID
"PA:01ID:STA_A_FES_OPEN_PL", # This is for 1ID
name="A_shutter",
)
else:
raise ValueError(f"Invalide mode, {mode}")
return A_shutter
@staticmethod
def get_fast_shutter(mode):
"""Return fast shutter"""
# TODO: implement the fast shutter, then instantiate it here
pass
class Tomography(Experiment):
"""Tomography experiment control for 6-ID-D."""
def __init__(self, mode='debug'):
super(Tomography, self).__init__(mode)
self._mode = mode
# instantiate device
self.tomo_stage = Tomography.get_tomostage(self._mode)
self.fly_control = Tomography.get_flycontrol(self._mode)
self.tomo_det = Tomography.get_detector(self._mode) # detector is initialized here
self.tomo_beam = Tomography.get_tomobeam(self._mode)
# TODO:
# we need to do some initialization with Beam based on
# a cached/lookup table
@property
def mode(self):
return f"current mode is {self._mode}, available options are ['debug', 'dryrun', 'production']"
@mode.setter
def mode(self, newmode):
self._mode = newmode
self.shutter = Experiment.get_main_shutter(self._mode)
self.tomo_stage = Tomography.get_tomostage(self._mode)
self.fly_control = Tomography.get_flycontrol(self._mode)
self.tomo_det = Tomography.get_detector(self._mode)
self.tomo_beam = Tomography.get_tomobeam(self._mode)
def check(self, cfg):
"""Return user input before run"""
cfg = load_config(cfg) if type(cfg) != dict else cfg
print(f"Tomo configuration:\n{dict_to_msg(cfg['tomo'])}")
print(f"Output:\n{dict_to_msg(cfg['output'])}")
def __repr__(self):
"""Return summary of the current experiment status"""
"""
beam = self.tomo_beam
stage = self.tomo_stage
# get the current beamline optics
# TODO: need to figure out how to get the beam energy
# commented out for Sim testing
_beamline_status = (
f"Beam Size is: {beam.s1.h_size}x{beam.s1.v_size} (HxV) \n"
f"Attenuation is: {beam.att_level} \n"
f"Beam Energy is: {beam.energy} \n"
f"Focus Lenses Positions: l1y @ {beam.l1.l1y} \n"
f" l2y @ {beam.l2.l2y} \n"
f" l3y @ {beam.l3.l3y} \n"
f" l4y @ {beam.l4.l4y} \n"
)
_status_msg = (
(f"Here is the current beamline status:\n") +
_beamline_status +
(f"\nHere are the current motor positions:\n") +
dict_to_msg(stage.position_cached) +
(f"\nHere is the current experiment configuration:\n")
# dict_to_msg(cfg['tomo']) +
# (f"\nHere are the file output info:\n") +
# dict_to_msg(cfg['output'])
)
return _status_msg
"""
pass
# TODO:
# verbose string representation of the experiment and beamline
# status as a dictionary -> yaml
def calibration(self):
"""Perform beamline calibration"""
# TODO:
# Still not clear how calibration can be done automatically, but
# let's keep a function here as a place holder
# Check out this auto alignment to see if some functions can be used here
# https://github.com/AdvancedPhotonSource/auto_sample_alignment.git
# Per conversation with Peter, This package can return the same location on the pin
# according to the images. However, they are requesting more features like determine
# the slit position and size.
# Jun and Peter will test this code during the first week of October, let wait for their feedback.
pass
@staticmethod
def get_tomobeam(mode):
"""return Tomobeam based on given mode"""
if mode.lower() in ['dryrun', 'production']:
beam = Beam()
elif mode.lower() == 'debug':
# NOTE:
# This is a place holder for maybe additional control of the beam
# simulated tomobeam from the virtual beamline
# dumped all the simulated beam control to m16
beam = SimBeam()
else:
raise ValueError(f"Invalide mode -> {mode}")
return beam
@staticmethod
def get_tomostage(mode):
"""return tomostage based on given mode"""
if mode.lower() in ['dryrun', 'production']:
tomostage = StageAero(name='tomostage')
elif mode.lower() == 'debug':
# NOTE:
# Using SimStageAero from the Virtual Beamline.
tomostage = SimStageAero(name='tomostage')
else:
raise ValueError(f"Invalide mode -> {mode}")
return tomostage
@staticmethod
def get_flycontrol(mode):
# We may have a different version of psofly
if mode.lower() == 'debug':
# TODO: need better simulated motors
from ophyd import sim
psofly = sim.flyer1
elif mode.lower() in ['dryrun', 'production']:
psofly = EnsemblePSOFlyDevice("PV_FLY", name="psofly")
else:
raise ValueError(f"Invalide mode, {mode}")
return psofly
@staticmethod
def get_detector(mode):
det_PV = {
'debug': "6iddSIMDET1:",
'dryrun': "1idPG4:",
'production': "1idPG4:",
}[mode.lower()]
det = {
'debug': SimDetector(det_PV, name='det'),
'dryrun': PointGreyDetector(det_PV, name='det'),
'production': PointGreyDetector(det_PV, name='det'),
}[mode.lower()]
# setup HDF5 layout using a hidden EPICS PV
# -- enumerator type
# -- need to set both write and RBV field
epics.caput(f"{det_PV}cam1:FrameType.ZRST", "/exchange/data_white_pre")
epics.caput(f"{det_PV}cam1:FrameType.ONST", "/exchange/data")
epics.caput(f"{det_PV}cam1:FrameType.TWST", "/exchange/data_white_post")
epics.caput(f"{det_PV}cam1:FrameType.THST", "/exchange/data_dark")
epics.caput(f"{det_PV}cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
epics.caput(f"{det_PV}cam1:FrameType_RBV.ONST", "/exchange/data")
epics.caput(f"{det_PV}cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
epics.caput(f"{det_PV}cam1:FrameType_RBV.THST", "/exchange/data_dark")
# set the attribute file (det.cam) and the layout file (det.hdf1)
# ISSUE:CRITICAL
# we are encoutering some strange issue that preventing the HDF5 plugin to
# function properly.
_current_fp = str(pathlib.Path(__file__).parent.absolute())
_attrib_fp = os.path.join(_current_fp, 'config/PG4_attributes.xml')
_layout_fp = os.path.join(_current_fp, 'config/tomo6idd_layout.xml')
det.cam1.nd_attributes_file.put(_attrib_fp)
det.hdf1.xml_file_name.put(_layout_fp)
# turn off the problematic auto setting in cam1
# NOTE:
# These settings should have been cached after a succesful run. We are just ensuring that
# the correct settings are used for the camera to prevent potential loss of cached settings
# -- related to auto-*
det.cam1.auto_exposure_auto_mode.put(0)
det.cam1.sharpness_auto_mode.put(0)
det.cam1.gain_auto_mode.put(0)
det.cam1.frame_rate_auto_mode.put(0)
# -- prime camera
# NOTE:
# By default, all file plugins have no idea the images dimension&size, therefore we need to pump
# in an image to let the file plugins know what to expect
# ---- get camera ready to keep taking image
det.cam1.acquire_time.put(0.001)
det.cam1.acquire_period.put(0.005)
det.cam1.image_mode.put('Continuous')
# ---- get tiff1 primed
det.tiff1.auto_increment.put(0)
det.tiff1.capture.put(0)
det.tiff1.enable.put(1)
det.tiff1.file_name.put('prime_my_tiff')
det.cam1.acquire.put(1)
sleep(0.01)
det.cam1.acquire.put(0)
det.tiff1.enable.put(0)
det.tiff1.auto_increment.put(1)
# ---- get hdf1 primed
det.hdf1.auto_increment.put(0)
det.hdf1.capture.put(0)
det.hdf1.enable.put(1)
det.hdf1.file_name.put('prime_my_hdf')
det.cam1.acquire.put(1)
sleep(0.01)
det.cam1.acquire.put(0)
det.hdf1.enable.put(0)
det.hdf1.auto_increment.put(1)
# ---- turn on auto save (supercede by disable, so we are safe)
det.tiff1.auto_save.put(1)
det.hdf1.auto_save.put(1)
# -- realted to proc1
det.proc1.filter_callbacks.put(1) # 0 Every array; 1 Array N only (useful for taking bg)
det.proc1.auto_reset_filter.put(1) # ALWAYS auto reset filter
# -- ?? more to come
# -- enter stand-by mode
det.cam1.image_mode.put('Multiple')
return det
# --------------------------------------------- #
# ----- pre-defined scan plans starts from here #
# --------------------------------------------- #
def collect_white_field(self, cfg_tomo, atfront=True):
"""
Collect white/flat field images by moving the sample out of the FOV
"""
# unpack devices
det = self.tomo_det
tomostage = self.tomo_stage
# move sample out of the way
# TODO:
# the details and fields need to be updated for 6-ID-D
_x = cfg_tomo['fronte_white_kx'] if atfront else cfg_tomo['back_white_kx']
_z = cfg_tomo['fronte_white_kz'] if atfront else cfg_tomo['back_white_kz']
yield from bps.mv(tomostage.kx, _x) #update with correct motor name
yield from bps.mv(tomostage.kz, _z)
# setup detector
# TODO:
# actual implementation need to be for 6-ID-D
# Raw images go through the following plugins:
# PG1 ==> TRANS1 ==> PROC1 ==> TIFF1
# || ||
# ==> IMAGE1 ======> HDF1
yield from bps.mv(det.proc1.nd_array_port, 'TRANS1')
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.trans1.enable, 1)
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.enable_filter, 1)
yield from bps.mv(det.proc1.filter_type, 'Average')
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam1.trigger_mode, "Internal")
yield from bps.mv(det.cam1.image_mode, "Multiple")
yield from bps.mv(det.cam1.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])
yield from bps.trigger_and_read([det])
# move sample back to FOV
# NOTE:
# not sure is this will work or not...
# TODO:
# need to update all the motor names according to StageAero
yield from bps.mv(tomostage.kx, cfg_tomo['initial_kx'])
yield from bps.mv(tomostage.kz, cfg_tomo['initial_kz'])
def collect_dark_field(self, cfg_tomo):
"""
Collect dark field images by close the shutter
"""
# TODO:
# Need to toggle Fast shutter
det = self.tomo_det
# Raw images go through the following plugins:
# PG1 ==> TRANS1 ==> PROC1 ==> TIFF1
# || ||
# ==> IMAGE1 ======> HDF1
yield from bps.mv(det.proc1.nd_array_port, 'TRANS1')
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.trans1.enable, 1)
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.enable_filter, 1)
yield from bps.mv(det.proc1.filter_type, 'Average')
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam1.trigger_mode, "Internal")
yield from bps.mv(det.cam1.image_mode, "Multiple")
yield from bps.mv(det.cam1.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])
yield from bps.trigger_and_read([det])
def step_scan(self, cfg_tomo):
"""
Collect projections with step motion
"""
# unpack devices
det = self.tomo_det
tomostage = self.tomo_stage
# TODO:
# the fields need to be updated for 6-ID-D
# Raw images go through the following plugins:
# PG1 ==> TRANS1 ==> PROC1 ==> TIFF1
# || ||
# ==> IMAGE1 ======> HDF1
yield from bps.mv(det.proc1.nd_array_port, 'TRANS1')
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.trans1.enable, 1)
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.enable_filter, 1)
yield from bps.mv(det.proc1.filter_type, 'Average')
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam1.num_images, cfg_tomo['n_frames'])
angs = np.arange(
cfg_tomo['omega_start'],
cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,
cfg_tomo['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(tomostage.rot, ang)
yield from bps.trigger_and_read([det])
def fly_scan(self, cfg_tomo):
"""
Collect projections with fly motion
"""
det = self.tomo_det
psofly = self.fly_control
# TODO:
# The fields need to be updated for 6-ID-D
# Raw images go through the following plugins:
# PG1 ==> TRANS1 ==> PROC1 ==> TIFF1
# || ||
# ==> IMAGE1 ======> HDF1
# TODO:
yield from bps.mv(det.proc1.nd_array_port, 'TRANS1')
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.trans1.enable, 1)
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.enable_filter, 1)
yield from bps.mv(det.proc1.filter_type, 'Average')
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam1.num_images, cfg_tomo['n_frames'])
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_tomo['omega_start'],
psofly.end, cfg_tomo['omega_end'],
psofly.scan_delta, abs(cfg_tomo['omega_step']),
psofly.slew_speed, cfg_tomo['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam1.num_images, cfg_tomo['n_projections'],
det.cam1.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def tomo_scan(self, cfg):
"""
Tomography scan plan based on given configuration
"""
# unpack devices
det = self.tomo_det
tomostage = self.tomo_stage
# TODO: commented for Sim test
shutter = self.shutter
shutter_suspender = self.suspend_shutter
beam = self.tomo_beam
# load experiment configurations
cfg = load_config(cfg) if type(cfg) != dict else cfg
# TODO:
# the following needs to be updated for 6-ID-D
# update the cached motor position in the dict in case exp goes wrong
_cached_position = self.tomo_stage.cache_position()
#########################
## step 0: preparation ##
#########################
acquire_time = cfg['tomo']['acquire_time']
acquire_period = cfg['tomo']['acquire_period']
n_white = cfg['tomo']['n_white']
n_dark = cfg['tomo']['n_dark']
angs = np.arange(
cfg['tomo']['omega_start'],
cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,
cfg['tomo']['omega_step'],
)
n_projections = len(angs)
cfg['tomo']['n_projections'] = n_projections
cfg['tomo']['total_images'] = n_white + n_projections + n_white + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# consider adding an extra step to:
# Perform energy calibration, set intended attenuation
# set the lenses, change the intended slit size
# prime the control of FS
#####################################
## step 0.1: check beam parameters ##
#####################################
# set slit sizes
# These are the 1-ID-E controls
# epics_put("1ide1:Kohzu_E_upHsize.VAL", ($1), 10) ##
# epics_put("1ide1:Kohzu_E_dnHsize.VAL", (($1)+0.1), 10) ##
# epics_put("1ide1:Kohzu_E_upVsize.VAL", ($2), 10) ## VERT SIZE
# epics_put("1ide1:Kohzu_E_dnVsize.VAL", ($2)+0.1, 10) ##
# _beam_h_size = cfg['tomo']['beamsize_h']
# _beam_v_size = cfg['tomo']['beamsize_v']
# yield from bps.mv(beam.s1.h_size, _beam_h_size )
# yield from bps.mv(beam.s1.v_size, _beam_v_size )
# yield from bps.mv(beam.s2.h_size, _beam_h_size + 0.1 ) # add 0.1 following 1ID convention
# yield from bps.mv(beam.s2.v_size, _beam_v_size + 0.1 ) # to safe guard the beam?
if self._mode.lower() in ['dryrun', 'production']:
# set attenuation
_attenuation = cfg['tomo']['attenuation']
yield from bps.mv(beam.att._motor, _attenuation)
# check energy
# need to be clear what we want to do here
_energy_foil = cfg['tomo']['energyfoil']
yield from bps.mv(beam.foil._motor, _energy_foil) # need to complete this part in beamline.py
# TODO:
# Instead of setting the beam optics, just check the current setup
# and print it out for user infomation.
# current beam size
# TODO:
# use softIOC to provide shortcut to resize slits
# cfg['tomo']['beamsize_h'] = beam.s1.h_size
# cfg['tomo']['beamsize_v'] = beam.s1.v_size
# current lenses (proposed...)
cfg['tomo']['focus_beam'] = beam.l1.l1y == 10 # to see if focusing is used
# current attenuation
cfg['tomo']['attenuation'] = beam.att._motor.get()
# check energy? may not be necessary.
# TODO:
# set up FS controls
# decide what to do with the focus lenses
# calculate slew speed for fly scan
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['tomo']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['tomo']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = tomostage.kx.position, tomostage.kz.position
dfx, dfz = cfg['tomo']['sample_out_position']['kx'], cfg['tomo']['sample_out_position']['kz']
rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['tomo']['initial_kx'] = x0
cfg['tomo']['initial_kz'] = z0
cfg['tomo']['fronte_white_kx'] = x0 + dfx
cfg['tomo']['fronte_white_kz'] = z0 + dfz
cfg['tomo']['back_white_kx'] = x0 + dbx
cfg['tomo']['back_white_kz'] = z0 + dbz
###############################################
## step 0.9: print out the cfg for user info ##
###############################################
self.check(cfg)
# NOTE: file path cannot be used with bps.mv, leading to a timeout error
for me in [det.tiff1, det.hdf1]:
me.file_path.put(fp)
@bpp.stage_decorator([det])
@bpp.run_decorator()
def scan_closure():
# TODO:
# Somewhere we need to check the light status
# open shutter for beam
if self._mode.lower() in ['production']:
yield from bps.mv(shutter, 'open')
yield from bps.install_suspender(shutter_suspender)
# config output
if self._mode.lower() in ['dryrun','production']:
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_name, fn)
# yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_write_mode, 2) # 1: capture, 2: stream
yield from bps.mv(me.num_capture, cfg['tomo']['total_images'])
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
elif self._mode.lower() in ['debug']:
for me in [det.tiff1, det.hdf1]:
# TODO: file path will lead to time out error in Sim test
# yield from bps.mv(me.file_path, '/data')
yield from bps.mv(me.file_name, fn)
yield from bps.mv(me.file_write_mode, 2) # 1: capture, 2: stream
yield from bps.mv(me.auto_increment, 1)
yield from bps.mv(me.num_capture, cfg['tomo']['total_images'])
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# setting acquire_time and acquire_period
yield from bps.mv(det.cam1.acquire_time, acquire_time)
yield from bps.mv(det.cam1.acquire_period, acquire_period)
# collect front white field
yield from bps.mv(det.cam1.frame_type, 0) # for HDF5 dxchange data structure
yield from self.collect_white_field(cfg['tomo'], atfront=True)
# collect projections
yield from bps.mv(det.cam1.frame_type, 1) # for HDF5 dxchange data structure
if cfg['tomo']['type'].lower() == 'step':
# run step_scan
yield from self.step_scan(cfg['tomo'])
elif cfg['tomo']['type'].lower() == 'fly':
yield from self.fly_scan(cfg['tomo'])
else:
raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}")
# collect back white field
yield from bps.mv(det.cam1.frame_type, 2) # for HDF5 dxchange data structure
yield from self.collect_white_field(cfg['tomo'], atfront=False)
# collect back dark field
yield from bps.mv(det.cam1.frame_type, 3) # for HDF5 dxchange data structure
# TODO: no shutter available for Sim testing
if self._mode.lower() in ['dryrun', 'production']:
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close")
yield from self.collect_dark_field(cfg['tomo'])
return (yield from scan_closure())
# summarize_plan with config yml file
def dryrun(self, scan_config):
"""use summarize_plan for quick analysis"""
return summarize_plan(self.tomo_scan(scan_config))
def run(self,scan_config):
"""run tomo_scan with RE"""
return self.RE(self.tomo_scan(scan_config))
class NearField(Experiment):
"""NF-HEDM control for 6-ID-D"""
def __init__(self, mode='debug'):
super(NearField, self).__init__(mode)
self._mode = mode
# instantiate device
self.nf_stage = NearField.get_nfstage(self._mode)
self.fly_control = NearField.get_flycontrol(self._mode)
self.nf_det = NearField.get_detector(self._mode)
self.nf_beam = NearField.get_nfbeam(self._mode)
if mode.lower() in ['debug']:
# take an image to prime the tiff1 and hdf1 plugin
self.nf_det.cam1.acquire_time.put(0.001)
self.nf_det.cam1.acquire_period.put(0.005)
self.nf_det.cam1.image_mode.put('Continuous')
self.nbf_det.tiff1.auto_increment.put(0)
self.nbf_det.tiff1.capture.put(0)
self.nbf_det.tiff1.enable.put(1)
self.nbf_det.tiff1.file_name.put('prime_my_tiff')
self.nbf_det.cam1.acquire.put(1)
sleep(0.01)
self.nf_det.cam1.acquire.put(0)
self.nf_det.tiff1.enable.put(0)
self.nf_det.tiff1.auto_increment.put(1)
self.nf_det.hdf1.auto_increment.put(0)
self.nf_det.hdf1.capture.put(0)
self.nf_det.hdf1.enable.put(1)
self.nf_det.hdf1.file_name.put('prime_my_hdf')
self.nf_det.cam1.acquire.put(1)
sleep(0.01)
self.nf_det.cam1.acquire.put(0)
self.nf_det.hdf1.enable.put(0)
self.nf_det.hdf1.auto_increment.put(1)
# set up auto save for tiff and hdf
self.nf_det.tiff1.auto_save.put(1)
self.nf_det.hdf1.auto_save.put(1)
# turn on proc1 filter
self.nf_det.proc1.enable_filter.put(1)
self.nf_det.proc1.auto_reset_filter.put(1)
self.nf_det.proc1.filter_callbacks.put(1)
# 0 for 'Every array'; 1 for 'Every N only'
# TODO:
# we need to do some initialization with Beam based on
# a cached/lookup table
@property
def mode(self):
return f"current mode is {self._mode}, available options are ['debug'. 'dryrun', 'production']"
@mode.setter
def mode(self, newmode):
self._mode = newmode
self.shutter = Experiment.get_main_shutter(self._mode)
self.nf_stage = NearField.get_nfstage(self._mode)
self.fly_control = NearField.get_flycontrol(self._mode)
self.nf_det = NearField.get_detector(self._mode)
self.nf_beam = NearField.get_nfbeam(self._mode)
def check(self, cfg):
"""Return user input before run"""
cfg = load_config(cfg) if type(cfg) != dict else cfg
print(f"NearField configuration:\n{dict_to_msg(cfg['nf'])}")
print(f"Output:\n{dict_to_msg(cfg['output'])}")
def __repr__(self):
"""Return summary of the current experiment status"""
"""
beam = self.nf_beam
stage = self.nf_stage
# get the current beamline optics
# TODO: need to figure out how to get the beam energy
_beamline_status = (
f"Beam Size is: {beam.s1.h_size}x{beam.s1.v_size} (HxV) \n"
f"Attenuation is: {beam.att_level} \n"
f"Beam Energy is: {beam.energy} \n"
f"Focus Lenses Positions: l1y @ {beam.l1.l1y} \n"
f" l2y @ {beam.l2.l2y} \n"
f" l3y @ {beam.l3.l3y} \n"
f" l4y @ {beam.l4.l4y} \n"
)
_status_msg = (
(f"Here is the current beamline status:\n") +
_beamline_status +
(f"\nHere are the current motor positions:\n") +
dict_to_msg(stage.position_cached) +
(f"\nHere is the current experiment configuration:\n")
# dict_to_msg(cfg['nf']) +
# (f"\nHere are the file output info:\n") +
# dict_to_msg(cfg['output'])
)
return _status_msg
"""
# TODO:
# verbose string representation of the experiment and beamline
# status as a dictionary -> yaml
def calibration(self):
"""Perform beamline calibration"""
# TODO:
# Propose not to do calibration here
# Calibration should be done in a seperate module, limit experiment to data collection.
# Should log calibration in RunEngine as well.
pass
@staticmethod
def get_nfbeam(mode):
"""return NFbeam based on given mode"""
if mode.lower() in ['dryrun', 'production']:
beam = Beam()
elif mode.lower() == 'debug':
# NOTE:
# This is a place holder for maybe additional control of the beam
# simulated tomobeam from the virtual beamline
# dumped all the simulated beam control to m16
beam = SimBeam()
else:
raise ValueError(f"Invalide mode -> {mode}")
return beam
@staticmethod
def get_nfstage(mode):
"""return nfstage based on given mode"""
if mode.lower() in ['dryrun', 'production']:
nfstage = StageAero(name='nfstage')
elif mode.lower() == 'debug':
nfstage = SimStageAero(name='nfstage')
else:
raise ValueError(f"Invalide mode -> {mode}")
return nfstage
@staticmethod
def get_flycontrol(mode):
if mode.lower() == 'debug':
# TODO: need better simulated motors
from ophyd import sim
psofly = sim.flyer1
elif mode.lower() in ['dryrun', 'production']:
psofly = EnsemblePSOFlyDevice("PV_FLY", name="psofly")
else:
raise ValueError(f"Invalide mode, {mode}")
return psofly
@staticmethod
def get_detector(mode):
# TODO: implement real PVs
if mode.lower() == 'debug':
det = SimDetector("6iddSIMDET1:", name='det')
epics.caput("6iddSIMDET1:cam1:FrameType.ZRST", "/exchange/data_white_pre")
epics.caput("6iddSIMDET1:cam1:FrameType.ONST", "/exchange/data")
epics.caput("6iddSIMDET1:cam1:FrameType.TWST", "/exchange/data_white_post")
epics.caput("6iddSIMDET1:cam1:FrameType.THST", "/exchange/data_dark")
# ophyd need this configuration
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.ONST", "/exchange/data")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.THST", "/exchange/data_dark")
# set the layout file for cam
# TODO: need to udpate with acutal config files for 6-ID-D
# commented out for Sim test
# _current_fp = str(pathlib.Path(__file__).parent.absolute())
# _attrib_fp = os.path.join(_current_fp, 'config/PG2_attributes.xml')
# _layout_fp = os.path.join(_current_fp, 'config/tomo6bma_layout.xml')
# det.cam1.nd_attributes_file.put(_attrib_fp)
# det.hdf1.xml_file_name.put(_layout_fp)
# # turn off the problematic auto setting in cam
# det.cam1.auto_exposure_auto_mode.put(0)
# det.cam1.sharpness_auto_mode.put(0)
# det.cam1.gain_auto_mode.put(0)
# det.cam1.frame_rate_auto_mode.put(0)
elif mode.lower() in ['dryrun', 'production']:
# TODO: Need to make sure this is correct
# change to PG4 for testing
det = PointGreyDetector("1idPG4:", name='det')
# TODO:
# Change the motor PV to the actual motor that moves the detector along z-axis
from ophyd import EpicsMotor
det.cam1.nfposition = EpicsMotor("6idhedm:m41:", name='nfposition')
# check the following page for important information
# https://github.com/BCDA-APS/use_bluesky/blob/master/notebooks/sandbox/images_darks_flats.ipynb
#
epics.caput("1idPG4:cam1:FrameType.ZRST", "/exchange/data_white_pre")
epics.caput("1idPG4:cam1:FrameType.ONST", "/exchange/data")
epics.caput("1idPG4:cam1:FrameType.TWST", "/exchange/data_white_post")
epics.caput("1idPG4:cam1:FrameType.THST", "/exchange/data_dark")
# ophyd need this configuration
epics.caput("1idPG4:cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
epics.caput("1idPG4:cam1:FrameType_RBV.ONST", "/exchange/data")
epics.caput("1idPG4:cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
epics.caput("1idPG4:cam1:FrameType_RBV.THST", "/exchange/data_dark")
# set the layout file for cam
# TODO: need to udpate with acutal config files for 6-ID-D
_current_fp = str(pathlib.Path(__file__).parent.absolute())
_attrib_fp = os.path.join(_current_fp, 'config/PG2_attributes.xml')
_layout_fp = os.path.join(_current_fp, 'config/tomo6bma_layout.xml')
det.cam1.nd_attributes_file.put(_attrib_fp)
det.hdf1.xml_file_name.put(_layout_fp)
# turn off the problematic auto setting in cam
det.cam1.auto_exposure_auto_mode.put(0)
det.cam1.sharpness_auto_mode.put(0)
det.cam1.gain_auto_mode.put(0)
det.cam1.frame_rate_auto_mode.put(0)
else:
raise ValueError(f"Invalide mode, {mode}")
det.proc1.filter_callbacks.put(1) # 0 Every array; 1 Array N only (useful for taking bg)
det.proc1.auto_reset_filter.put(1) # ALWAYS auto reset filter
return det
############################
## Near Field Calibration ##
############################
# NOT to be used in scan plans
def calibration(self):
"""Image calibration for the two NF z positions"""
det = self.nf_det
# TODO: what needs to be done here
# add the z motor?
# add the beamstp motor?
pass
# ----- pre-defined scan plans starts from here
@bpp.run_decorator()
def fly_scan(self, cfg_nf):
"""
Collect projections with fly motion
"""
det = self.nf_det
psofly = self.fly_control
# TODO:
# Need to set up FS control for the scan
# During fly scan, the FS is always open
# TODO:
# The fields need to be updated for 6-ID-D
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_nf['omega_start'],
psofly.end, cfg_nf['omega_end'],
psofly.scan_delta, abs(cfg_nf['omega_step']),
psofly.slew_speed, cfg_nf['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam1.num_images, cfg_nf['n_projections'],
det.cam1.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def nf_scan(self, cfg):
"""
NearField scan plan based on given configuration
"""
# unpack devices
det = self.nf_det
stage = self.nf_stage
shutter = self.shutter
shutter_suspender = self.suspend_shutter
beam = self.nf_beam
# load experiment configurations
cfg = load_config(cfg) if type(cfg) != dict else cfg
# TODO:
# the following needs to be updated for 6-ID-D
# update the cached motor position in the dict in case exp goes wrong
_cached_position = self.nf_stage.cache_position()
#########################
## step 0: preparation ##
#########################
acquire_time = cfg['nf']['acquire_time']
angs = np.arange(
cfg['nf']['omega_start'],
cfg['nf']['omega_end']+cfg['nf']['omega_step']/2,
cfg['nf']['omega_step'],
)
n_projections = len(angs)
cfg['nf']['n_projections'] = n_projections
cfg['nf']['total_images'] = n_projections
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# consider adding an extra step to:
# Perform energy calibration, set intended attenuation
# set the lenses, change the intended slit size
# prime the control of FS
#############################################
## step 0.1: check and set beam parameters ##
#############################################
# set slit sizes
# These are the 1-ID-E controls
# epics_put("1ide1:Kohzu_E_upHsize.VAL", ($1), 10) ##
# epics_put("1ide1:Kohzu_E_dnHsize.VAL", (($1)+0.1), 10) ##
# epics_put("1ide1:Kohzu_E_upVsize.VAL", ($2), 10) ## VERT SIZE
# epics_put("1ide1:Kohzu_E_dnVsize.VAL", ($2)+0.1, 10) ##
# _beam_h_size = cfg['nf']['beamsize_h']
# _beam_v_size = cfg['nf']['beamsize_v']
# yield from bps.mv(beam.s1.h_size, _beam_h_size )
# yield from bps.mv(beam.s1.v_size, _beam_v_size )
# yield from bps.mv(beam.s2.h_size, _beam_h_size + 0.1 ) # add 0.1 following 1ID convention
# yield from bps.mv(beam.s2.v_size, _beam_v_size + 0.1 ) # to safe guard the beam?
# set attenuation
# _attenuation = cfg['nf']['attenuation']
# yield from bps.mv(beam.att.att_level, _attenuation)
# check energy
# need to be clear what we want to do here
# _energy_foil = cfg['nf']['energyfoil']
# yield from bps.mv(beam.foil, _energy_foil) # need to complete this part in beamline.py
# TODO:
# Instead of setting the beam optics, just check the current setup
# and print it out for user infomation.
# current beam size
# cfg['nf']['beamsize_h'] = beam.s1.h_size
# cfg['nf']['beamsize_v'] = beam.s1.v_size
# current lenses (proposed...)
cfg['nf']['focus_beam'] = beam.l1.l1y.position == 10 # to see if focusing is used
# current attenuation
# TODO: commented for Sim testing
# cfg['nf']['attenuation'] = beam.att.att_level
# check energy? may not be necessary.
# TODO:
# set up FS controls
# decide what to do with the focus lenses
#######################################
## calculate slew speed for fly scan ##
#######################################
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
scan_time = (acquire_time+cfg['nf']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['nf']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = stage.kx.position, stage.kz.position
dfx, dfz = cfg['nf']['sample_out_position']['kx'], cfg['nf']['sample_out_position']['kz']
rotang = np.radians(cfg['nf']['omega_end']-cfg['nf']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['nf']['initial_kx'] = x0
cfg['nf']['initial_kz'] = z0
cfg['nf']['fronte_white_kx'] = x0 + dfx
cfg['nf']['fronte_white_kz'] = z0 + dfz
cfg['nf']['back_white_kx'] = x0 + dbx
cfg['nf']['back_white_kz'] = z0 + dbz
## Ideally, we set up the FS control once, then the FS will be controlled with
## intended signals
#########################################
## Function for NF Single Layer Scan ##
#########################################
self.check(cfg)
@bpp.stage_decorator([det])
@bpp.run_decorator()
def scan_singlelayer(self, cfg_nf, _layer_number):
# TODO:
# Somewhere we need to check the light status, or, add a suspender?
# config output
# currently set up to output 1 HDF5 file for each NF layer, including 2 det positions
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, '{}_layer{:06d}'.format(fn, _layer_number))
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, cfg['nf']['total_images']*2) # *2 for two det positions
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# TODO:
# Add FS control here to toggle the FS or Main Shutter?
# collect projections in the current layer in the FIRST det z position
yield from bps.mv(det.cam1.frame_type, 1) # for HDF5 dxchange data structure
yield from bps.mv(det.cam1.nfposition, cfg_nf['detector_z_position']['nf_z1']) # need actual motor
yield from self.fly_scan(cfg['nf'])
# collect projections in the current layer in the SECOND det z position
yield from bps.mv(det.cam1.nfposition, cfg_nf['detector_z_position']['nf_z2']) # need actual motor
yield from self.fly_scan(cfg['nf'])
# TODO:
# Add FS control here to close the FS or Main Shutter?
############################
## Near Field Volume Scan ##
############################
n_layers = cfg['nf']['volume']['n_layers']
ky_start = cfg['nf']['volume']['ky_start']
ky_step = cfg['nf']['volume']['ky_step']
if ky_step == 0:
# To repeat the current layer for n_layer times
# !!! The layer/file number will still increase for this same layer
_scan_positions = np.arange(1, n_layers+1, 1)
for _layer_number_count in _scan_positions:
yield from bps.mv(stage.ky, ky_start)
yield from scan_singlelayer(self, cfg['nf'], _layer_number_count) ### NOT sure if this works!!!
# For regular scans
elif ky_step != 0:
_layer_number_count = 1
_scan_positions = np.arange(ky_start, ky_start+(n_layers-0.5)*ky_step, ky_step)
for _current_scan_ky in _scan_positions:
yield from bps.mv(stage.ky, _current_scan_ky)
yield from scan_singlelayer(self, cfg['nf'], _layer_number_count) ### NOT sure if this works!!!
_layer_number_count += 1
# summarize_plan with config yml file
def dryrun(self, scan_config):
"""use summarize_plan for quick analysis"""
return summarize_plan(self.nf_scan(scan_config))
class FarField(Experiment):
"""FF-HEDM control for 6-ID-D"""
def __init__(self, mode='debug'):
super(FarField, self).__init__(mode)
self._mode = mode
# instantiate device
self.ff_stage = FarField.get_ffstage(self._mode)
self.fly_control = FarField.get_flycontrol(self._mode)
self.ff_det = FarField.get_detector(self._mode)
self.ff_beam = FarField.get_ffbeam(self._mode)
# TODO: Do we need to do this for the farfield?
if mode.lower() in ['debug']:
# take an image to prime the tiff1 and hdf1 plugin
self.ff_det.cam1.acquire_time.put(0.001)
self.ff_det.cam1.acquire_period.put(0.005)
self.ff_det.cam1.image_mode.put('Continuous')
self.ff_det.tiff1.auto_increment.put(0)
self.ff_det.tiff1.capture.put(0)
self.ff_det.tiff1.enable.put(1)
self.ff_det.tiff1.file_name.put('prime_my_tiff')
self.ff_det.cam1.acquire.put(1)
sleep(0.01)
self.ff_det.cam1.acquire.put(0)
self.ff_det.tiff1.enable.put(0)
self.ff_det.tiff1.auto_increment.put(1)
self.ff_det.hdf1.auto_increment.put(0)
self.ff_det.hdf1.capture.put(0)
self.ff_det.hdf1.enable.put(1)
self.ff_det.hdf1.file_name.put('prime_my_hdf')
self.ff_det.cam1.acquire.put(1)
sleep(0.01)
self.ff_det.cam1.acquire.put(0)
self.ff_det.hdf1.enable.put(0)
self.ff_det.hdf1.auto_increment.put(1)
# set up auto save for tiff and hdf
self.ff_det.tiff1.auto_save.put(1)
self.ff_det.hdf1.auto_save.put(1)
# turn on proc1 filter
self.ff_det.proc1.enable_filter.put(1)
self.ff_det.proc1.auto_reset_filter.put(1)
self.ff_det.proc1.filter_callbacks.put(1)
# 0 for 'Every array'; 1 for 'Every N only'
# TODO:
# we need to do some initialization with Beam based on
# a cached/lookup table
@property
def mode(self):
return f"current mode is {self._mode}, available options are ['debug', 'dryrun', 'production']"
@mode.setter
def mode(self, newmode):
self._mode = newmode
self.shutter = Experiment.get_main_shutter(self._mode)
self.ff_stage = FarField.get_ffstage(self._mode)
self.fly_control = FarField.get_flycontrol(self._mode)
self.ff_det = FarField.get_detector(self._mode)
self.ff_beam = FarField.get_ffbeam(self._mode)
def check(self, cfg):
"""Return user input before run"""
cfg = load_config(cfg) if type(cfg) != dict else cfg
print(f"FarField configuration:\n{dict_to_msg(cfg['ff'])}")
print(f"Output:\n{dict_to_msg(cfg['output'])}")
def __repr__(self):
"""Return summary of the current experiment status"""
"""
beam = self.ff_beam
stage = self.ff_stage
# get the current beamline optics
# TODO: need to figure out how to get the beam energy
_beamline_status = (
f"Beam Size is: {beam.s1.h_size}x{beam.s1.v_size} (HxV) \n"
f"Attenuation is: {beam.att_level} \n"
f"Beam Energy is: {beam.energy} \n"
f"Focus Lenses Positions: l1y @ {beam.l1.l1y} \n"
f" l2y @ {beam.l2.l2y} \n"
f" l3y @ {beam.l3.l3y} \n"
f" l4y @ {beam.l4.l4y} \n"
)
_status_msg = (
(f"Here is the current beamline status:\n") +
_beamline_status +
(f"\nHere are the current motor positions:\n") +
dict_to_msg(stage.position_cached) +
(f"\nHere is the current experiment configuration:\n")
# dict_to_msg(cfg['ff']) +
# (f"\nHere are the file output info:\n") +
# dict_to_msg(cfg['output'])
)
return _status_msg
# TODO:
# verbose string representation of the experiment and beamline
# status as a dictionary -> yaml
"""
def calibration(self):
"""Perform beamline calibration"""
# TODO:
# Should probably do this in a seperate module
# Still not clear how calibration can be done automatically, but
# let's keep a function here as a place holder
# Check out this auto alignment to see if some functions can be used here
# https://github.com/AdvancedPhotonSource/auto_sample_alignment.git
# Per conversation with Peter, This package can return the same location on the pin
# according to the images. However, they are requesting more features like determine
# the slit position and size.
# Jun and Peter will test this code during the first week of October, let wait for their feedback.
pass
@staticmethod
def get_ffbeam(mode):
"""return FFbeam based on given mode"""
if mode.lower() in ['dryrun', 'production']:
beam = Beam()
elif mode.lower() == 'debug':
# NOTE:
# This is a place holder for maybe additional control of the beam
# simulated tomobeam from the virtual beamline
# dumped all the simulated beam control to m16
beam = SimBeam()
else:
raise ValueError(f"Invalide mode -> {mode}")
return beam
@staticmethod
def get_ffstage(mode):
"""return nfstage based on given mode"""
if mode.lower() in ['dryrun', 'production']:
ffstage = StageAero(name='ffstage')
elif mode.lower() == 'debug':
ffstage = SimStageAero(name='ffstage')
else:
raise ValueError(f"Invalide mode -> {mode}")
return ffstage
@staticmethod
def get_flycontrol(mode):
if mode.lower() == 'debug':
# TODO: need better simulated motors
from ophyd import sim
psofly = sim.flyer1
elif mode.lower() in ['dryrun', 'production']:
psofly = EnsemblePSOFlyDevice("PV_FLY", name="psofly")
else:
raise ValueError(f"Invalide mode, {mode}")
return psofly
@staticmethod
def get_detector(mode):
# TODO: implement real PVs
if mode.lower() == 'debug':
det = SimDetector("6iddSIMDET1:", name='det')
epics.caput("6iddSIMDET1:cam1:FrameType.ZRST", "/exchange/data_white_pre")
epics.caput("6iddSIMDET1:cam1:FrameType.ONST", "/exchange/data")
epics.caput("6iddSIMDET1:cam1:FrameType.TWST", "/exchange/data_white_post")
epics.caput("6iddSIMDET1:cam1:FrameType.THST", "/exchange/data_dark")
# ophyd need this configuration
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.ONST", "/exchange/data")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
epics.caput("6iddSIMDET1:cam1:FrameType_RBV.THST", "/exchange/data_dark")
# set the layout file for cam
# TODO: need to udpate with acutal config files for 6-ID-D
_current_fp = str(pathlib.Path(__file__).parent.absolute())
_attrib_fp = os.path.join(_current_fp, 'config/PG2_attributes.xml')
_layout_fp = os.path.join(_current_fp, 'config/tomo6bma_layout.xml')
det.cam1.nd_attributes_file.put(_attrib_fp)
det.hdf1.xml_file_name.put(_layout_fp)
# turn off the problematic auto setting in cam1
det.cam1.auto_exposure_auto_mode.put(0)
det.cam1.sharpness_auto_mode.put(0)
det.cam1.gain_auto_mode.put(0)
det.cam1.frame_rate_auto_mode.put(0)
elif mode.lower() in ['dryrun', 'production']:
### TODO:
det = PointGreyDetector("1idPG4:", name='det')
# TODO:
# Change the motor PV to the actual motor that moves the detector along z-axis
from ophyd import EpicsMotor
det.cam1.nfposition = EpicsMotor("6idhedm:m41:", name='nfposition')
# check the following page for important information
# https://github.com/BCDA-APS/use_bluesky/blob/master/notebooks/sandbox/images_darks_flats.ipynb
#
epics.caput("1idPG4:cam1:FrameType.ZRST", "/exchange/data_white_pre")
epics.caput("1idPG4:cam1:FrameType.ONST", "/exchange/data")
epics.caput("1idPG4:cam1:FrameType.TWST", "/exchange/data_white_post")
epics.caput("1idPG4:cam1:FrameType.THST", "/exchange/data_dark")
# ophyd need this configuration
epics.caput("1idPG4:cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
epics.caput("1idPG4:cam1:FrameType_RBV.ONST", "/exchange/data")
epics.caput("1idPG4:cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
epics.caput("1idPG4:cam1:FrameType_RBV.THST", "/exchange/data_dark")
# set the layout file for cam
# TODO: need to udpate with acutal config files for 6-ID-D
_current_fp = str(pathlib.Path(__file__).parent.absolute())
_attrib_fp = os.path.join(_current_fp, 'config/PG2_attributes.xml')
_layout_fp = os.path.join(_current_fp, 'config/tomo6bma_layout.xml')
det.cam1.nd_attributes_file.put(_attrib_fp)
det.hdf1.xml_file_name.put(_layout_fp)
# turn off the problematic auto setting in cam
det.cam1.auto_exposure_auto_mode.put(0)
det.cam1.sharpness_auto_mode.put(0)
det.cam1.gain_auto_mode.put(0)
det.cam1.frame_rate_auto_mode.put(0)
### Need to get have the Dexela configured in Devices.py
# det = DexelaDetector("PV_DET", name='det')
# """
# # check the following page for important information
# # https://github.com/BCDA-APS/use_bluesky/blob/master/notebooks/sandbox/images_darks_flats.ipynb
# #
# epics.caput("PV_DET:cam1:FrameType.ZRST", "/exchange/data_white_pre")
# epics.caput("PV_DET:cam1:FrameType.ONST", "/exchange/data")
# epics.caput("PV_DET:cam1:FrameType.TWST", "/exchange/data_white_post")
# epics.caput("PV_DET:cam1:FrameType.THST", "/exchange/data_dark")
# # ophyd need this configuration
# epics.caput("PV_DET:cam1:FrameType_RBV.ZRST", "/exchange/data_white_pre")
# epics.caput("PV_DET:cam1:FrameType_RBV.ONST", "/exchange/data")
# epics.caput("PV_DET:cam1:FrameType_RBV.TWST", "/exchange/data_white_post")
# epics.caput("PV_DET:cam1:FrameType_RBV.THST", "/exchange/data_dark")
# # set the layout file for cam
# # TODO: need to udpate with acutal config files for 6-ID-D
# _current_fp = str(pathlib.Path(__file__).parent.absolute())
# _attrib_fp = os.path.join(_current_fp, 'config/PG2_attributes.xml')
# _layout_fp = os.path.join(_current_fp, 'config/tomo6bma_layout.xml')
# det.cam1.nd_attributes_file.put(_attrib_fp)
# det.hdf1.xml_file_name.put(_layout_fp)
# # turn off the problematic auto setting in cam1
# det.cam1.auto_exposure_auto_mode.put(0)
# det.cam1.sharpness_auto_mode.put(0)
# det.cam1.gain_auto_mode.put(0)
# det.cam1.frame_rate_auto_mode.put(0)
# """
else:
raise ValueError(f"Invalide mode, {mode}")
det.proc1.filter_callbacks.put(1) # 0 Every array; 1 Array N only (useful for taking bg)
det.proc1.auto_reset_filter.put(1) # ALWAYS auto reset filter
return det
###########################
## Far Field Calibration ##
###########################
# NOT to be used in scan plans
def calibration(self):
"""Far field calibration for detectors"""
det = self.ff_det
# TODO:
# add the z motor?
# not sure what we need here
pass
# ----- pre-defined scan plans starts from here
@bpp.run_decorator()
def collect_dark_field(self, cfg_ff):
"""
Collect dark field images by close the shutter
"""
# TODO:
# Need to toggle Fast shutter, or main??
det = self.ff_det
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_ff['n_frames'])
yield from bps.mv(det.cam1.trigger_mode, "Internal")
yield from bps.mv(det.cam1.image_mode, "Multiple")
yield from bps.mv(det.cam1.num_images, cfg_ff['n_frames']*cfg_ff['n_dark'])
yield from bps.trigger_and_read([det])
@bpp.run_decorator()
def step_scan(self, cfg_ff):
"""
Collect projections with step motion
"""
# unpack devices
det = self.ff_det
ffstage = self.ff_stage
# TODO:
# the fields need to be updated for 6-ID-D
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_ff['n_frames'])
angs = np.arange(
cfg_ff['omega_start'],
cfg_ff['omega_end']+cfg_ff['omega_step']/2,
cfg_ff['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(ffstage.rot, ang)
yield from bps.trigger_and_read([det]) ### Why is this [det] instead of det? /JasonZ
@bpp.run_decorator()
def fly_scan(self, cfg_ff):
"""
Collect projections with fly motion
"""
det = self.ff_det
psofly = self.fly_control
# TODO:
# Need to set up FS control for the scan
# During fly scan, the FS is always open
# For step scan, FS is triggered along with the detector (not always)
# TODO:
# The fields need to be updated for 6-ID-D FF-HEDM
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_ff['omega_start'],
psofly.end, cfg_ff['omega_end'],
psofly.scan_delta, abs(cfg_ff['omega_step']),
psofly.slew_speed, cfg_ff['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam1.num_images, cfg_ff['n_projections'],
det.cam1.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def ff_scan(self, cfg):
"""
FarField scan plan based on given configuration
"""
# unpack devices
det = self.ff_det
stage = self.ff_stage
shutter = self.shutter
shutter_suspender = self.suspend_shutter
beam = self.ff_beam
# load experiment configurations
cfg = load_config(cfg) if type(cfg) != dict else cfg
# TODO:
# the following needs to be updated for 6-ID-D
# update the cached motor position in the dict in case exp goes wrong
_cached_position = self.ff_stage.cache_position()
#########################
## step 0: preparation ##
#########################
acquire_time = cfg['ff']['acquire_time']
n_dark = cfg['ff']['n_dark']
angs = np.arange(
cfg['ff']['omega_start'],
cfg['ff']['omega_end']+cfg['nf']['omega_step']/2,
cfg['ff']['omega_step'],
)
n_projections = len(angs)
cfg['ff']['n_projections'] = n_projections
cfg['ff']['total_images'] = n_dark + n_projections + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# consider adding an extra step to:
# Perform energy calibration, set intended attenuation
# set the lenses, change the intended slit size
# prime the control of FS
#############################################
## step 0.1: check and set beam parameters ##
#############################################
# set slit sizes
# These are the 1-ID-E controls
# epics_put("1ide1:Kohzu_E_upHsize.VAL", ($1), 10) ##
# epics_put("1ide1:Kohzu_E_dnHsize.VAL", (($1)+0.1), 10) ##
# epics_put("1ide1:Kohzu_E_upVsize.VAL", ($2), 10) ## VERT SIZE
# # epics_put("1ide1:Kohzu_E_dnVsize.VAL", ($2)+0.1, 10) ##
# _beam_h_size = cfg['ff']['beamsize_h']
# _beam_v_size = cfg['ff']['beamsize_v']
# yield from bps.mv(beam.s1.h_size, _beam_h_size )
# yield from bps.mv(beam.s1.v_size, _beam_v_size )
# yield from bps.mv(beam.s2.h_size, _beam_h_size + 0.1 ) # add 0.1 following 1ID convention
# yield from bps.mv(beam.s2.v_size, _beam_v_size + 0.1 ) # to safe guard the beam?
# # set attenuation
# _attenuation = cfg['ff']['attenuation']
# yield from bps.mv(beam.att.att_level, _attenuation)
# # check energy
# # need to be clear what we want to do here
# _energy_foil = cfg['nf']['energyfoil']
# yield from bps.mv(beam.foil, _energy_foil) # need to complete this part in beamline.py
# TODO:
# Instead of setting the beam optics, just check the current setup
# and print it out for user infomation.
# current beam size
# cfg['ff']['beamsize_h'] = beam.s1.h_size
# cfg['ff']['beamsize_v'] = beam.s1.v_size
# current lenses (proposed...)
cfg['ff']['focus_beam'] = beam.l1.l1y.position == 10 # to see if focusing is used
# current attenuation
# TODO: att_level commented out for Sim test
# cfg['ff']['attenuation'] = beam.att.att_level
# check energy? may not be necessary.
# TODO:
# set up FS controls
# decide what to do with the focus lenses
#######################################
## calculate slew speed for fly scan ##
#######################################
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['ff']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['ff']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['ff']['slew_speed'] = slew_speed
# TODO:
# If this is not used during scans, consider moving this to corresponding part
# need to make sure that the sample out position is the same for both front and back
x0, z0 = stage.kx.position, stage.kz.position
dfx, dfz = cfg['ff']['sample_out_position']['kx'], cfg['nf']['sample_out_position']['kz']
rotang = np.radians(cfg['ff']['omega_end']-cfg['nf']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['ff']['initial_kx'] = x0
cfg['ff']['initial_kz'] = z0
cfg['ff']['fronte_white_kx'] = x0 + dfx
cfg['ff']['fronte_white_kz'] = z0 + dfz
cfg['ff']['back_white_kx'] = x0 + dbx
cfg['ff']['back_white_kz'] = z0 + dbz
## Ideally, we set up the FS control once, then the FS will be controlled with
## intended signals
#########################################
## Function for FF Single Layer Scan ##
#########################################
self.check(cfg)
@bpp.stage_decorator([det])
@bpp.run_decorator()
def scan_singlelayer(self, cfg_ff, _layer_number):
# TODO:
# Somewhere we need to check the light status, or, add a suspender?
# config output
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, '{}_layer{:06d}'.format(fn, _layer_number))
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, cfg['ff']['total_images'])
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# TODO:
# Add FS control here to toggle the FS or Main Shutter?
# collect front dark field
yield from bps.mv(det.cam1.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close") # let's discuss which shutter to use here
yield from self.collect_dark_field(cfg['ff'])
### NOTE: the main shutter may be closed after dark field!!!
# collect projections in the current layer in the det z position
# Let's discussed if we want det z control during scans
yield from bps.mv(det.cam1.frame_type, 1) # for HDF5 dxchange data structure
if cfg['ff']['type'].lower() == 'step':
yield from self.step_scan(cfg['ff'])
elif cfg['ff']['type'].lower() == 'fly':
# yield from bps.mv(det.cam1.position, cfg_ff['detector_z_position']['ff_z1']) # need actual motor
yield from self.fly_scan(cfg['ff'])
else:
raise ValueError(f"Unsupported scan type: {cfg['ff']['type']}")
# collect back dark field
yield from bps.mv(det.cam1.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close") # let's discuss which shutter to use here
yield from self.collect_dark_field(cfg['ff'])
# TODO:
# Add FS control here to close the FS or Main Shutter?
###########################
## Far Field Volume Scan ##
###########################
n_layers = cfg['ff']['volume']['n_layers']
ky_start = cfg['ff']['volume']['ky_start']
ky_step = cfg['ff']['volume']['ky_step']
if ky_step == 0:
# To repeat the current layer for n_layer times
# !!! The layer/file number will still increase for this same layer
_scan_positions = np.arange(1, n_layers+1, 1)
for _layer_number_count in _scan_positions:
yield from bps.mv(stage.ky, ky_start)
yield from scan_singlelayer(self, cfg['ff'], _layer_number_count) ### NOT sure if this works!!!
# For regular scans
elif ky_step != 0:
_layer_number_count = 1
_scan_positions = | np.arange(ky_start, ky_start+(n_layers-0.5)*ky_step, ky_step) | numpy.arange |
import batoid
from test_helpers import timer
import numpy as np
@timer
def test_normalized():
for _ in range(1000):
x = np.random.uniform()
y = np.random.uniform()
z = np.random.uniform()
w = np.random.uniform()
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y, z])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y, z, w])),
1.0,
rtol=0, atol=1e-10
)
@timer
def test_gnomicDirCos():
np.random.seed(5)
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
# Test round trip
u1, v1 = batoid.utils.dirCosToGnomic(*batoid.utils.gnomicToDirCos(u, v))
np.testing.assert_allclose(u, u1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(v, v1, rtol=1e-10, atol=1e-12)
# Test round trip in the other direction
alpha = np.random.uniform(-0.1, 0.1, size=1000)
beta = np.random.uniform(-0.1, 0.1, size=1000)
gamma = np.sqrt(1 - alpha**2 - beta**2)
alpha1, beta1, gamma1 = batoid.utils.gnomicToDirCos(
*batoid.utils.dirCosToGnomic(alpha, beta, gamma)
)
np.testing.assert_allclose(alpha, alpha1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(beta, beta1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(gamma, gamma1, rtol=1e-10, atol=1e-12)
# For really tiny angles, u/v should be basically the same as alpha/beta
u = np.random.uniform(-1e-6, 1e-6, size=1000)
v = np.random.uniform(-1e-6, 1e-6, size=1000)
alpha, beta, gamma = batoid.utils.gnomicToDirCos(u, v)
np.testing.assert_allclose(alpha, u, rtol=0, atol=1e-8)
np.testing.assert_allclose(beta, v, rtol=0, atol=1e-8)
# Check normalization of direction cosines
np.testing.assert_allclose(np.sqrt(alpha*alpha+beta*beta+gamma*gamma), 1, rtol=0, atol=1e-15)
@timer
def test_gnomicSpherical():
np.random.seed(57)
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
# Test round trip
u1, v1 = batoid.utils.sphericalToGnomic(*batoid.utils.gnomicToSpherical(u, v))
np.testing.assert_allclose(u, u1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(v, v1, rtol=1e-10, atol=1e-12)
# Test round trip in other direction
phi = np.random.uniform(0.0, 0.1, size=1000)
theta = np.random.uniform(-np.pi, np.pi, size=1000)
phi1, theta1 = batoid.utils.gnomicToSpherical(*batoid.utils.sphericalToGnomic(phi, theta))
np.testing.assert_allclose(phi, phi1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(theta, theta1, rtol=1e-10, atol=1e-12)
# Check u**2 + v**2 = tan(phi)**2
u, v = batoid.utils.sphericalToGnomic(phi, theta)
np.testing.assert_allclose(np.tan(phi)**2, u**2+v**2, rtol=0, atol=1e-17)
# Check v/u = tan(theta)
np.testing.assert_allclose(np.tan(theta), v/u, rtol=1e-15, atol=0)
@timer
def test_sphericalToDirCos():
np.random.seed(577)
phi = np.random.uniform(0.0, 0.1, size=1000)
theta = np.random.uniform(-np.pi, np.pi, size=1000)
# Test round trip
phi1, theta1 = batoid.utils.dirCosToSpherical(*batoid.utils.sphericalToDirCos(phi, theta))
np.testing.assert_allclose(phi, phi1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(theta, theta1, rtol=1e-10, atol=1e-12)
# Test round trip in other direction
alpha = np.random.uniform(-0.1, 0.1, size=1000)
beta = np.random.uniform(-0.1, 0.1, size=1000)
gamma = np.sqrt(1 - alpha**2 - beta**2)
alpha1, beta1, gamma1 = batoid.utils.sphericalToDirCos(
*batoid.utils.dirCosToSpherical(alpha, beta, gamma)
)
np.testing.assert_allclose(alpha, alpha1, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(beta, beta1, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(gamma, gamma1, rtol=1e-10, atol=1e-12)
# Check normalization of direction cosines
np.testing.assert_allclose(np.sqrt(alpha1*alpha1+beta1*beta1+gamma1*gamma1), 1, rtol=0, atol=1e-15)
@timer
def test_composition():
np.random.seed(5772)
# Let's try spherical -> dirCos -> gnomic = spherical -> gnomic
phi = np.random.uniform(0.0, 0.1, size=1000)
theta = np.random.uniform(-np.pi, np.pi, size=1000)
u1, v1 = batoid.utils.dirCosToGnomic(*batoid.utils.sphericalToDirCos(phi, theta))
u2, v2 = batoid.utils.sphericalToGnomic(phi, theta)
np.testing.assert_allclose(u1, u2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(v1, v2, rtol=1e-10, atol=1e-10)
# And cycle: gnomic -> spherical -> dirCos = gnomic -> dirCos
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
a1, b1, c1 = batoid.utils.sphericalToDirCos(*batoid.utils.gnomicToSpherical(u, v))
a2, b2, c2 = batoid.utils.gnomicToDirCos(u, v)
np.testing.assert_allclose(a1, a2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(b1, b2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(c1, c2, rtol=1e-10, atol=1e-10)
# And cycle: dirCos -> gnomic -> spherical = dirCos -> spherical
a = np.random.uniform(-0.1, 0.1, size=1000)
b = np.random.uniform(-0.1, 0.1, size=1000)
c = np.sqrt(1 - a*a - b*b)
ph1, th1 = batoid.utils.gnomicToSpherical(*batoid.utils.dirCosToGnomic(a, b, c))
ph2, th2 = batoid.utils.dirCosToSpherical(a, b, c)
np.testing.assert_allclose(ph1, ph2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(th1, th2, rtol=1e-10, atol=1e-10)
# And reverse direction: gnomic -> dirCos -> spherical = gnomic -> spherical
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
ph1, th1 = batoid.utils.dirCosToSpherical(*batoid.utils.gnomicToDirCos(u, v))
ph2, th2 = batoid.utils.gnomicToSpherical(u, v)
np.testing.assert_allclose(ph1, ph2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(th1, th2, rtol=1e-10, atol=1e-10)
# and cycle: spherical -> gnomic -> dirCos = spherical -> dirCos
phi = np.random.uniform(0.0, 0.1, size=1000)
theta = np.random.uniform(-np.pi, np.pi, size=1000)
a1, b1, c1 = batoid.utils.gnomicToDirCos(*batoid.utils.sphericalToGnomic(phi, theta))
a2, b2, c2 = batoid.utils.sphericalToDirCos(phi, theta)
np.testing.assert_allclose(a1, a2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(b1, b2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(c1, c2, rtol=1e-10, atol=1e-10)
# and cycle: dirCos -> spherical -> gnomic = dirCos -> gnomic
a = np.random.uniform(-0.1, 0.1, size=1000)
b = np.random.uniform(-0.1, 0.1, size=1000)
c = np.sqrt(1 - a*a - b*b)
u1, v1 = batoid.utils.sphericalToGnomic(*batoid.utils.dirCosToSpherical(a, b, c))
u2, v2 = batoid.utils.dirCosToGnomic(a, b, c)
np.testing.assert_allclose(u1, u2, rtol=1e-10, atol=1e-10)
np.testing.assert_allclose(v1, v2, rtol=1e-10, atol=1e-10)
@timer
def test_jacobian():
np.random.seed(57721)
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
phi, theta = batoid.utils.gnomicToSpherical(u, v)
jac1 = batoid.utils.dSphericalDGnomic(u, v)
jac2 = batoid.utils.dGnomicDSpherical(phi, theta)
# Check that the product of the jacobian and its inverse is the identity matrix
np.testing.assert_allclose(
np.matmul(np.transpose(jac1, (2,0,1)), np.transpose(jac2, (2,0,1))),
np.transpose(np.tile(np.eye(2)[:,:,None], 1000), (2,0,1)),
rtol=0, atol=1e-15
)
np.testing.assert_allclose(
np.matmul(np.transpose(jac2, (2,0,1)), np.transpose(jac1, (2,0,1))),
np.transpose(np.tile(np.eye(2)[:,:,None], 1000), (2,0,1)),
rtol=0, atol=1e-15
)
# Check d(u, v)/d(phi, theta) against finite difference
du = dv = 1e-8
phi2, theta2 = batoid.utils.gnomicToSpherical(u+du, v)
phi3, theta3 = batoid.utils.gnomicToSpherical(u, v+dv)
np.testing.assert_allclose(
jac1[0,0,:],
(phi2-phi)/du,
atol=1e-5
)
np.testing.assert_allclose(
jac1[0,1,:],
(phi3-phi)/du,
atol=1e-5
)
np.testing.assert_allclose(
jac1[1,0,:],
np.sin(phi)*(theta2-theta)/du,
atol=1e-5
)
np.testing.assert_allclose(
jac1[1,1,:],
| np.sin(phi) | numpy.sin |
import numpy
import xraylib
import scipy.constants as codata
# needed by bragg_calc
from xoppylib.crystals.bragg_preprocessor_file_io import bragg_preprocessor_file_v2_write
from dabax.common_tools import f0_xop, f0_xop_with_fractional_charge
from dabax.common_tools import bragg_metrictensor, lorentz, atomic_symbols
import sys
import os
import platform
from xoppylib.xoppy_util import locations
from dabax.dabax_xraylib import DabaxXraylib
#
#
#
def bragg_metrictensor(a,b,c,a1,a2,a3,RETURN_REAL_SPACE=0,RETURN_VOLUME=0,HKL=None):
"""
Returns the metric tensor in the reciprocal space
:param a: unit cell a
:param b: unit cell b
:param c: unit cell c
:param a1: unit cell alpha
:param a2: unit cell beta
:param a3: unit cell gamma
:param RETURN_REAL_SPACE: set to 1 for returning metric tensor in real space
:param RETURN_VOLUME: set to 1 to return the unit cell volume in Angstroms^3
:param HKL: if !=None, returns the d-spacing for the corresponding [H,K,L] reflection
:return: the returned value depends on the keywords used. If RETURN_REAL_SPACE=0,RETURN_VOLUME=0, and HKL=None
then retuns the metric tensor in reciprocal space.
"""
# input cell a,b,c,alpha,beta,gamma; angles in degrees
a1 *= numpy.pi / 180.0
a2 *= numpy.pi / 180.0
a3 *= numpy.pi / 180.0
# ;
# ; tensor in real space
# ;
g = numpy.array( [ [a*a, a*b*numpy.cos(a3), a*c*numpy.cos(a2)], \
[a*b*numpy.cos(a3), b*b, b*c*numpy.cos(a1)], \
[a*c*numpy.cos(a2), b*c*numpy.cos(a1), c*c]] )
if RETURN_REAL_SPACE: return g
# print("g: ",g)
# ;
# ; volume of the lattice
# ;
volume2 = numpy.linalg.det(g)
volume = numpy.sqrt(volume2)
# print("Volume of unit cell: %g A^3",volume)
if RETURN_VOLUME: return volume
# ;
# ; tensor in reciprocal space
# ;
ginv = | numpy.linalg.inv(g) | numpy.linalg.inv |
import warnings
import numpy as np
import numpy.testing as npt
from dipy.data import get_fnames
from dipy.core.gradients import (gradient_table, GradientTable,
gradient_table_from_bvals_bvecs,
gradient_table_from_qvals_bvecs,
gradient_table_from_gradient_strength_bvecs,
WATER_GYROMAGNETIC_RATIO,
reorient_bvecs, generate_bvecs,
check_multi_b)
from dipy.io.gradients import read_bvals_bvecs
def test_btable_prepare():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
# bt.info
fimg, fbvals, fbvecs = get_fnames('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
bvecs = np.where(np.isnan(bvecs), 0, bvecs)
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt2 = gradient_table(bvals, bvecs.T)
npt.assert_array_equal(bt2.bvecs, bvecs)
btab = np.concatenate((bvals[:, None], bvecs), axis=1)
bt3 = gradient_table(btab)
npt.assert_array_equal(bt3.bvecs, bvecs)
npt.assert_array_equal(bt3.bvals, bvals)
bt4 = gradient_table(btab.T)
npt.assert_array_equal(bt4.bvecs, bvecs)
npt.assert_array_equal(bt4.bvals, bvals)
# Test for proper inputs (expects either bvals/bvecs or 4 by n):
npt.assert_raises(ValueError, gradient_table, bvecs)
def test_GradientTable():
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
expected_bvals = np.array([0, 1, 1, 5, 13])
expected_b0s_mask = expected_bvals == 0
expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None]
gt = GradientTable(gradients, b0_threshold=0)
npt.assert_array_almost_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask)
npt.assert_array_almost_equal(gt.bvecs, expected_bvecs)
npt.assert_array_almost_equal(gt.gradients, gradients)
gt = GradientTable(gradients, b0_threshold=1)
npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0])
npt.assert_array_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.bvecs, expected_bvecs)
# checks negative values in gtab
npt.assert_raises(ValueError, GradientTable, -1)
npt.assert_raises(ValueError, GradientTable, np.ones((6, 2)))
npt.assert_raises(ValueError, GradientTable, np.ones((6,)))
with warnings.catch_warnings(record=True) as w:
bad_gt = gradient_table(expected_bvals, expected_bvecs,
b0_threshold=200)
assert len(w) == 1
def test_gradient_table_from_qvals_bvecs():
qvals = 30. * np.ones(7)
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
qvals[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_qvals_bvecs(qvals, bvecs,
big_delta, small_delta)
bvals_expected = (qvals * 2 * np.pi) ** 2 * (big_delta - small_delta / 3.)
gradient_strength_expected = qvals * 2 * np.pi /\
(small_delta * WATER_GYROMAGNETIC_RATIO)
npt.assert_almost_equal(gt.gradient_strength, gradient_strength_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_gradient_strength_bvecs():
gradient_strength = .03e-3 * np.ones(7) # clinical strength at 30 mT/m
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
gradient_strength[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_gradient_strength_bvecs(gradient_strength, bvecs,
big_delta, small_delta)
qvals_expected = (gradient_strength * WATER_GYROMAGNETIC_RATIO *
small_delta / (2 * np.pi))
bvals_expected = (qvals_expected * 2 * np.pi) ** 2 *\
(big_delta - small_delta / 3.)
npt.assert_almost_equal(gt.qvals, qvals_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_bvals_bvecs():
sq2 = np.sqrt(2) / 2
bvals = [0, 1, 2, 3, 4, 5, 6, 0]
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.gradients, np.reshape(bvals, (-1, 1)) * bvecs)
npt.assert_array_equal(gt.b0s_mask, [1, 0, 0, 0, 0, 0, 0, 1])
# Test nans are replaced by 0
new_bvecs = bvecs.copy()
new_bvecs[[0, -1]] = np.nan
gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
# Bvalue > 0 for non-unit vector
bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# num_gard inconsistent bvals, bvecs
bad_bvals = np.ones(7)
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# negative bvals
bad_bvals = [-1, -1, -1, -5, -6, -10]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvals not 1d
bad_bvals = np.ones((1, 8))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvec not 2d
bad_bvecs = np.ones((1, 8, 3))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvec not (N, 3)
bad_bvecs = np.ones((8, 2))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvecs not unit vectors
bad_bvecs = bvecs * 2
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# Test **kargs get passed along
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0,
big_delta=5, small_delta=2)
npt.assert_equal(gt.big_delta, 5)
npt.assert_equal(gt.small_delta, 2)
def test_b0s():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(8)
bvals[0] = 0
bvals[7] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(np.where(bt.b0s_mask > 0)[0], | np.array([0, 7]) | numpy.array |
"""Coding Practice #0616."""
import numpy as np
import cv2
# 1. Convolutional filtering.
# Open an image in B/W (Gray scale) and show it.
img = cv2.imread('picture_LenaSoderberg_small.jpg', 0) # Open as a B/W image.
cv2.imshow("In Gray Scale", img)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.1. Applying the Gaussian blur kernel.
img_blurred = cv2.GaussianBlur(img, ksize=(5, 5), sigmaX=10, sigmaY=10)
cv2.imshow("Blurred", img_blurred)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.2. Applying the Sharpening kernel.
kernel_sharp = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
img_sharp = cv2.filter2D(img, ddepth=-1, kernel=kernel_sharp) # ddepth = -1 => the destination has the same depth as the source image.
cv2.imshow("Sharpened", img_sharp)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.3. Applying the Outline kernel #1.
kernel_outline1 = | np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) | numpy.array |
"""Crowded field photometry pipeline.
This module fits positions, fluxes, PSFs, and sky backgrounds of images.
Intended usage is:
>>> x, y, flux, model, psf = fit_im(im, psf_initial, weight=wim,
psfderiv=numpy.gradient(-psf),
nskyx=3, nskyy=3, refit_psf=True)
which returns the best fit positions (x, y), fluxes (flux), model image
(model), and improved psf (psf) to the image im, with an initial psf guess
(psf_initial), an inverse-variance image wim, and a variable sky background.
See mosaic.py for how to use this on a large image that is too big to be fit
entirely simultaneously.
"""
import numpy
import pdb
import crowdsource.psf as psfmod
import scipy.ndimage.filters as filters
from collections import OrderedDict
import sys
nodeblend_maskbit = 2**30
sharp_maskbit = 2**31
def shift(im, offset, **kw):
"""Wrapper for scipy.ndimage.interpolation.shift"""
from scipy.ndimage.interpolation import shift
if 'order' not in kw:
kw['order'] = 4
# 1" Gaussian: 60 umag; 0.75": 0.4 mmag; 0.5": 4 mmag
# order=3 roughly 5x worse.
if 'mode' not in kw:
kw['mode'] = 'nearest'
if 'output' not in kw:
kw['output'] = im.dtype
return shift(im, offset, **kw)
def sim_image(nx, ny, nstar, psf, noise, nskyx=3, nskyy=3, stampsz=19):
im = numpy.random.randn(nx, ny).astype('f4')*noise
stampszo2 = stampsz // 2
im = numpy.pad(im, [stampszo2, stampszo2], constant_values=-1e6,
mode='constant')
x = numpy.random.rand(nstar).astype('f4')*(nx-1)
y = numpy.random.rand(nstar).astype('f4')*(ny-1)
flux = 1./numpy.random.power(1.0, nstar)
for i in range(nstar):
stamp = psf(x[i], y[i], stampsz=stampsz)
xl = numpy.round(x[i]).astype('i4')
yl = numpy.round(y[i]).astype('i4')
im[xl:xl+stampsz, yl:yl+stampsz] += stamp*flux[i]
if (nskyx != 0) or (nskyy != 0):
im += sky_model(100*numpy.random.rand(nskyx, nskyy).astype('f4'),
im.shape[0], im.shape[1])
ret = im[stampszo2:-stampszo2, stampszo2:-stampszo2], x, y, flux
return ret
def significance_image(im, model, isig, psf, sz=19):
"""Significance of a PSF at each point, without local background fit."""
# assume, for the moment, the image has already been sky-subtracted
def convolve(im, kernel):
from scipy.signal import fftconvolve
return fftconvolve(im, kernel[::-1, ::-1], mode='same')
# identical to 1e-8 or so
# from scipy.ndimage.filters import convolve
# return convolve(im, kernel[::-1, ::-1], mode='nearest')
psfstamp = psfmod.central_stamp(psf, sz).copy()
sigim = convolve(im*isig**2., psfstamp)
varim = convolve(isig**2., psfstamp**2.)
modim = convolve(model*isig**2., psfstamp)
varim[varim <= 1e-14] = 0. # numerical noise starts to set in around here.
ivarim = 1./(varim + (varim == 0) * 1e14)
return sigim*numpy.sqrt(ivarim), modim*numpy.sqrt(ivarim)
def significance_image_lbs(im, model, isig, psf, sz=19):
"""Give significance of PSF at each point, with local background fits."""
def convolve(im, kernel):
from scipy.signal import fftconvolve
return fftconvolve(im, kernel[::-1, ::-1], mode='same')
def convolve_flat(im, sz):
from scipy.ndimage.filters import convolve
filt = numpy.ones(sz, dtype='f4')
c1 = convolve(im, filt.reshape(1, -1), mode='constant', origin=0)
return convolve(c1, filt.reshape(-1, 1), mode='constant', origin=0)
# we need: * convolution of ivar with P^2
# * convolution of ivar with flat
# * convolution of ivar with P
# * convolution of b*ivar with P
# * convolution of b*ivar with flat
ivar = isig**2.
if sz is None:
psfstamp = psfmod.central_stamp(psf).copy()
else:
psfstamp = psfmod.central_stamp(psf, censize=sz).copy()
ivarp2 = convolve(ivar, psfstamp**2.)
ivarp2[ivarp2 < 0] = 0.
ivarimsimple = 1./(ivarp2 + (ivarp2 == 0) * 1e12)
ivarf = convolve_flat(ivar, psfstamp.shape[0])
ivarp = convolve(ivar, psfstamp)
bivarp = convolve(im*ivar, psfstamp)
bivarf = convolve_flat(im*ivar, psfstamp.shape[0])
atcinvadet = ivarp2*ivarf-ivarp**2.
atcinvadet[atcinvadet <= 0] = 1.e-12
ivarf[ivarf <= 0] = 1.e-12
fluxest = (bivarp*ivarf-ivarp*bivarf)/atcinvadet
fluxisig = numpy.sqrt(atcinvadet/ivarf)
fluxsig = fluxest*fluxisig
modim = convolve(model*ivar, psfstamp)
return fluxsig, modim*numpy.sqrt(ivarimsimple)
def peakfind(im, model, isig, dq, psf, keepsat=False, threshold=5,
blendthreshold=0.3, psfvalsharpcutfac=0.7, psfsharpsat=0.7):
psfstamp = psf(int(im.shape[0]/2.), int(im.shape[1]/2.), deriv=False,
stampsz=59)
sigim, modelsigim = significance_image(im, model, isig, psfstamp,
sz=59)
sig_max = filters.maximum_filter(sigim, 3)
x, y = numpy.nonzero((sig_max == sigim) & (sigim > threshold) &
(keepsat | (isig > 0)))
fluxratio = im[x, y]/numpy.clip(model[x, y], 0.01, numpy.inf)
sigratio = (im[x, y]*isig[x, y])/numpy.clip(modelsigim[x, y], 0.01,
numpy.inf)
sigratio2 = sigim[x, y]/numpy.clip(modelsigim[x, y], 0.01, numpy.inf)
keepsatcensrc = keepsat & (isig[x, y] == 0)
m = ((isig[x, y] > 0) | keepsatcensrc) # ~saturated, or saturated & keep
if dq is not None and numpy.any(dq[x, y] & nodeblend_maskbit):
nodeblend = (dq[x, y] & nodeblend_maskbit) != 0
blendthreshold = numpy.ones_like(x)*blendthreshold
blendthreshold[nodeblend] = 100
if dq is not None and numpy.any(dq[x, y] & sharp_maskbit):
sharp = (dq[x, y] & sharp_maskbit) != 0
msharp = ~sharp | psfvalsharpcut(
x, y, sigim, isig, psfstamp, psfvalsharpcutfac=psfvalsharpcutfac,
psfsharpsat=psfsharpsat)
# keep if not nebulous region or sharp peak.
m = m & msharp
m = m & ((sigratio2 > blendthreshold*2) |
((fluxratio > blendthreshold) & (sigratio > blendthreshold/4.) &
(sigratio2 > blendthreshold)))
return x[m], y[m]
def psfvalsharpcut(x, y, sigim, isig, psf, psfvalsharpcutfac=0.7,
psfsharpsat=0.7):
xl = numpy.clip(x-1, 0, sigim.shape[0]-1)
xr = numpy.clip(x+1, 0, sigim.shape[0]-1)
yl = numpy.clip(y-1, 0, sigim.shape[1]-1)
yr = numpy.clip(y+1, 0, sigim.shape[1]-1)
# sigim[x, y] should always be >0 from threshold cut.
psfval1 = 1-(sigim[xl, y]+sigim[xr, y])/(2*sigim[x, y])
psfval2 = 1-(sigim[x, yl]+sigim[x, yr])/(2*sigim[x, y])
psfval3 = 1-(sigim[xl, yl]+sigim[xr, yr])/(2*sigim[x, y])
psfval4 = 1-(sigim[xl, yr]+sigim[xr, yl])/(2*sigim[x, y])
# in nebulous region, there should be a peak of these around the PSF
# size, plus a bunch of diffuse things (psfval ~ 0).
from scipy.signal import fftconvolve
pp = fftconvolve(psf, psf[::-1, ::-1], mode='same')
half = psf.shape[0] // 2
ppcen = pp[half, half]
psfval1pp = 1-(pp[half-1, half]+pp[half+1, half])/(2*ppcen)
psfval2pp = 1-(pp[half, half-1]+pp[half, half+1])/(2*ppcen)
psfval3pp = 1-(pp[half-1, half-1]+pp[half+1, half+1])/(2*ppcen)
psfval4pp = 1-(pp[half-1, half+1]+pp[half+1, half-1])/(2*ppcen)
fac = psfvalsharpcutfac*(1-psfsharpsat*(isig[x, y] == 0))
# more forgiving if center is masked.
res = ((psfval1 > psfval1pp*fac) & (psfval2 > psfval2pp*fac) &
(psfval3 > psfval3pp*fac) & (psfval4 > psfval4pp*fac))
return res
def build_model(x, y, flux, nx, ny, psf=None, psflist=None, psfderiv=False):
if psf is None and psflist is None:
raise ValueError('One of psf and psflist must be set')
if psf is not None and psflist is not None:
raise ValueError('Only one of psf and psflist must be set')
if psflist is None:
stampsz = 59
psflist = build_psf_list(x, y, psf, stampsz, psfderiv=psfderiv)
sz = numpy.ones(len(x), dtype='i4')*stampsz
else:
sz = numpy.array([tpsf[0].shape[-1] for tpsf in psflist[0]])
if len(sz) > 0:
stampsz = numpy.max(sz)
else:
stampsz = 59
stampszo2 = stampsz//2
im = numpy.zeros((nx, ny), dtype='f4')
im = numpy.pad(im, [stampszo2, stampszo2], constant_values=0.,
mode='constant')
xp = numpy.round(x).astype('i4')
yp = numpy.round(y).astype('i4')
# _subtract_ stampszo2 to move from the center of the PSF to the edge
# of the stamp.
# _add_ it back to move from the original image to the padded image.
xe = xp - sz//2 + stampszo2
ye = yp - sz//2 + stampszo2
repeat = 3 if psfderiv else 1
for i in range(len(x)):
for j in range(repeat):
im[xe[i]:xe[i]+sz[i], ye[i]:ye[i]+sz[i]] += (
psflist[j][i][:, :]*flux[i*repeat+j])
im = im[stampszo2:-stampszo2, stampszo2:-stampszo2]
return im
def build_psf_list(x, y, psf, sz, psfderiv=True):
"""Make a list of PSFs of the right size, hopefully efficiently."""
psflist = {}
for tsz in numpy.unique(sz):
m = sz == tsz
res = psf(x[m], y[m], stampsz=tsz, deriv=psfderiv)
if not psfderiv:
res = [res]
psflist[tsz] = res
counts = {tsz: 0 for tsz in numpy.unique(sz)}
out = [[] for i in range(3 if psfderiv else 1)]
for i in range(len(x)):
for j in range(len(out)):
out[j].append(psflist[sz[i]][j][counts[sz[i]]])
counts[sz[i]] += 1
return out
def in_padded_region(flatcoord, imshape, pad):
coord = numpy.unravel_index(flatcoord, imshape)
m = numpy.zeros(len(flatcoord), dtype='bool')
for c, length in zip(coord, imshape):
m |= (c < pad) | (c >= length - pad)
return m
def fit_once(im, x, y, psfs, weight=None,
psfderiv=False, nskyx=0, nskyy=0,
guess=None):
"""Fit fluxes for psfs at x & y in image im.
Args:
im (ndarray[NX, NY] float): image to fit
x (ndarray[NS] float): x coord
y (ndarray[NS] float): y coord
psf (ndarray[sz, sz] float): psf stamp
weight (ndarray[NX, NY] float): weight for image
psfderiv (tuple(ndarray[sz, sz] float)): x, y derivatives of psf image
nskyx (int): number of sky pixels in x direction (0 or >= 3)
nskyy (int): number of sky pixels in y direction (0 or >= 3)
Returns:
tuple(flux, model, sky)
flux: output of optimization routine; needs to be refined
model (ndarray[NX, NY]): best fit model image
sky (ndarray(NX, NY]): best fit model sky
"""
# sparse matrix, with rows at first equal to the fluxes at each peak
# later add in the derivatives at each peak
sz = numpy.array([tpsf[0].shape[-1] for tpsf in psfs[0]])
if len(sz) > 0:
stampsz = numpy.max(sz)
else:
stampsz = 19
stampszo2 = stampsz // 2
szo2 = sz // 2
nx, ny = im.shape
pad = stampszo2 + 1
im = numpy.pad(im, [pad, pad], constant_values=0.,
mode='constant')
if weight is None:
weight = numpy.ones_like(im)
weight = numpy.pad(weight, [pad, pad], constant_values=0.,
mode='constant')
weight[weight == 0.] = 1.e-20
pix = numpy.arange(stampsz*stampsz, dtype='i4').reshape(stampsz, stampsz)
# convention: x is the first index, y is the second
# sorry.
xpix = pix // stampsz
ypix = pix % stampsz
xp = numpy.round(x).astype('i4')
yp = numpy.round(y).astype('i4')
# _subtract_ stampszo2 to move from the center of the PSF to the edge
# of the stamp.
# _add_ pad back to move from the original image to the padded image.
xe = xp - stampszo2 + pad
ye = yp - stampszo2 + pad
repeat = 1 if not psfderiv else 3
nskypar = nskyx * nskyy
npixim = im.shape[0]*im.shape[1]
xloc = numpy.zeros(repeat*numpy.sum(sz*sz).astype('i4') +
nskypar*npixim, dtype='i4')
# yloc = numpy.zeros(len(xloc), dtype='i4')
# no longer need yloc; csc entries are built directly.
values = numpy.zeros(len(xloc), dtype='f4')
colnorm = numpy.zeros(len(x)*repeat+nskypar, dtype='f4')
first = 0
for i in range(len(xe)):
f = stampszo2-szo2[i]
l = stampsz - f
wt = weight[xe[i]:xe[i]+stampsz, ye[i]:ye[i]+stampsz][f:l, f:l]
for j in range(repeat):
xloc[first:first+sz[i]**2] = (
numpy.ravel_multi_index(((xe[i]+xpix[f:l, f:l]),
(ye[i]+ypix[f:l, f:l])),
im.shape)).reshape(-1)
# yloc[first:first+sz[i]**2] = i*repeat+j
values[first:first+sz[i]**2] = (
(psfs[j][i][:, :]*wt).reshape(-1))
colnorm[i*repeat+j] = numpy.sqrt(
numpy.sum(values[first:first+sz[i]**2]**2.))
colnorm[i*repeat+j] += (colnorm[i*repeat+j] == 0)
values[first:first+sz[i]**2] /= colnorm[i*repeat+j]
first += sz[i]**2
if nskypar != 0:
sxloc, syloc, svalues = sky_parameters(nx+pad*2, ny+pad*2,
nskyx, nskyy, weight)
startidx = len(x)*repeat
nskypix = len(sxloc[0])
for i in range(len(sxloc)):
xloc[first:first+nskypix] = sxloc[i]
# yloc[first:first+nskypix] = startidx+syloc[i]
colnorm[startidx+i] = numpy.sqrt(numpy.sum(svalues[i]**2.))
colnorm[startidx+i] += (colnorm[startidx+i] == 0.)
values[first:first+nskypix] = svalues[i] / colnorm[startidx+i]
first += nskypix
shape = (im.shape[0]*im.shape[1], len(x)*repeat+nskypar)
from scipy import sparse
csc_indptr = numpy.cumsum([sz[i]**2 for i in range(len(x))
for j in range(repeat)])
csc_indptr = numpy.concatenate([[0], csc_indptr])
if nskypar != 0:
csc_indptr = numpy.concatenate([csc_indptr, [
csc_indptr[-1] + i*nskypix for i in range(1, nskypar+1)]])
mat = sparse.csc_matrix((values, xloc, csc_indptr), shape=shape,
dtype='f4')
if guess is not None:
# guess is a guess for the fluxes and sky; no derivatives.
guessvec = numpy.zeros(len(xe)*repeat+nskypar, dtype='f4')
guessvec[0:len(xe)*repeat:repeat] = guess[0:len(xe)]
if nskypar > 0:
guessvec[-nskypar:] = guess[-nskypar:]
guessvec *= colnorm
else:
guessvec = None
flux = lsqr_cp(mat, (im*weight).ravel(), atol=1.e-4, btol=1.e-4,
guess=guessvec)
model = mat.dot(flux[0]).reshape(*im.shape)
flux[0][:] = flux[0][:] / colnorm
im = im[pad:-pad, pad:-pad]
model = model[pad:-pad, pad:-pad]
weight = weight[pad:-pad, pad:-pad]
if nskypar != 0:
sky = sky_model(flux[0][-nskypar:].reshape(nskyx, nskyy),
nx+pad*2, ny+pad*2)
sky = sky[pad:-pad, pad:-pad]
else:
sky = model * 0
model = model / (weight + (weight == 0))
res = (flux, model, sky)
return res
def unpack_fitpar(guess, nsource, psfderiv):
"""Extract fluxes and sky parameters from fit parameter vector."""
repeat = 3 if psfderiv else 1
return guess[0:nsource*repeat:repeat], guess[nsource*repeat:]
def lsqr_cp(aa, bb, guess=None, **kw):
# implement two speed-ups:
# 1. "column preconditioning": make sure each column of aa has the same
# norm
# 2. allow guesses
# column preconditioning is important (substantial speedup), and has
# been implemented directly in fit_once.
# allow guesses: solving Ax = b is the same as solving A(x-x*) = b-Ax*.
# => A(dx) = b-Ax*. So we can solve for dx instead, then return dx+x*.
# This improves speed if we reduce the tolerance.
from scipy.sparse import linalg
if guess is not None:
bb2 = bb - aa.dot(guess)
if 'btol' in kw:
fac = numpy.sum(bb**2.)**(0.5)/numpy.sum(bb2**2.)**0.5
kw['btol'] = kw['btol']*numpy.clip(fac, 0.1, 10.)
else:
bb2 = bb.copy()
normbb = numpy.sum(bb2**2.)
bb2 /= normbb**(0.5)
par = linalg.lsqr(aa, bb2, **kw)
# for some reason, everything ends up as double precision after this
# or lsmr; lsqr seems to be better
# par[0][:] *= norm**(-0.5)*normbb**(0.5)
par[0][:] *= normbb**0.5
if guess is not None:
par[0][:] += guess
par = list(par)
par[0] = par[0].astype('f4')
par[9] = par[9].astype('f4')
return par
def compute_centroids(x, y, psflist, flux, im, resid, weight,
derivcentroids=False, centroidsize=19):
# define c = integral(x * I * P * W) / integral(I * P * W)
# x = x/y coordinate, I = isolated stamp, P = PSF model, W = weight
# Assuming I ~ P(x-y) for some small offset y and expanding,
# integrating by parts gives:
# y = 2 / integral(P*P*W) * integral(x*(I-P)*W)
# that is the offset we want.
# we want to compute the centroids on the image after the other sources
# have been subtracted off.
# we construct this image by taking the residual image, and then
# star-by-star adding the model back.
psfs = [numpy.zeros((len(x), centroidsize, centroidsize), dtype='f4')
for i in range(len(psflist))]
for j in range(len(psflist)):
for i in range(len(x)):
psfs[j][i, :, :] = psfmod.central_stamp(psflist[j][i],
censize=centroidsize)
stampsz = psfs[0].shape[-1]
stampszo2 = (stampsz-1)//2
dx = numpy.arange(stampsz, dtype='i4')-stampszo2
dx = dx.reshape(-1, 1)
dy = dx.copy().reshape(1, -1)
xp = numpy.round(x).astype('i4')
yp = numpy.round(y).astype('i4')
# subtracting to get to the edge of the stamp, adding back to deal with
# the padded image.
xe = xp - stampszo2 + stampszo2
ye = yp - stampszo2 + stampszo2
resid = numpy.pad(resid, [stampszo2, stampszo2], constant_values=0.,
mode='constant')
weight = numpy.pad(weight, [stampszo2, stampszo2], constant_values=0.,
mode='constant')
im = numpy.pad(im, [stampszo2, stampszo2], constant_values=0.,
mode='constant')
repeat = len(psflist)
residst = numpy.array([resid[xe0:xe0+stampsz, ye0:ye0+stampsz]
for (xe0, ye0) in zip(xe, ye)])
weightst = numpy.array([weight[xe0:xe0+stampsz, ye0:ye0+stampsz]
for (xe0, ye0) in zip(xe, ye)])
psfst = psfs[0] * flux[:len(x)*repeat:repeat].reshape(-1, 1, 1)
imst = numpy.array([im[xe0:xe0+stampsz, ye0:ye0+stampsz]
for (xe0, ye0) in zip(xe, ye)])
if len(x) == 0:
weightst = psfs[0].copy()
residst = psfs[0].copy()
imst = psfs[0].copy()
modelst = psfst.copy()
if len(psflist) > 1:
modelst += psfs[1]*flux[1:len(x)*repeat:repeat].reshape(-1, 1, 1)
modelst += psfs[2]*flux[2:len(x)*repeat:repeat].reshape(-1, 1, 1)
cen = []
ppw = numpy.sum(modelst*modelst*weightst, axis=(1, 2))
pp = numpy.sum(modelst*modelst, axis=(1, 2))
for dc in (dx, dy):
xrpw = numpy.sum(dc[None, :, :]*residst*modelst*weightst, axis=(1, 2))
xmmpm = numpy.sum(dc[None, :, :]*(modelst-psfst)*modelst, axis=(1, 2))
cen.append(2*xrpw/(ppw + (ppw == 0.))*(ppw != 0.) +
2*xmmpm/(pp + (pp == 0.))*(pp != 0.))
xcen, ycen = cen
norm = numpy.sum(modelst, axis=(1, 2))
norm = norm + (norm == 0)
psfqf = numpy.sum(modelst*(weightst > 0), axis=(1, 2)) / norm
# how should we really be doing this? derivcentroids is the first order
# approximation to the right thing. the centroid computation that I do
# otherwise should be unbiased but noisier than optimal for significantly
# offset peaks. <NAME> (2016) say that I should convolve with the
# PSF and interpolate to the brightest point with some polynomial. I
# expected this to be slow (convolving thousands of stamps individually
# with the PSF each iteration), but the spread_model code worked pretty
# well, so this is probably a worthwhile thing to try. if it worked, it
# would obviate some of the code mess above, and be optimal, so that
# sounds worthwhile.
if not derivcentroids:
m = psfqf < 0.5
else:
m = numpy.ones(len(xcen), dtype='bool')
xcen[m] = 0.
ycen[m] = 0.
if (len(psflist) > 1) and numpy.sum(m) > 0:
ind = numpy.flatnonzero(m)
# just use the derivative-based centroids for this case.
fluxnz = flux[repeat*ind]
fluxnz = fluxnz + (fluxnz == 0)
xcen[ind] = flux[repeat*ind+1]/fluxnz
ycen[ind] = flux[repeat*ind+2]/fluxnz
# stamps: 0: neighbor-subtracted images,
# 1: images,
# 2: psfs with shifts
# 3: psfs without shifts
res = (xcen, ycen, (modelst+residst, imst, modelst, weightst, psfst))
return res
def estimate_sky_background(im):
"""Find peak of count distribution; pretend this is the sky background."""
# for some reason, I have found this hard to work robustly. Replace with
# median at the moment.
return numpy.median(im)
def sky_im(im, weight=None, npix=20, order=1):
"""Remove sky from image."""
nbinx, nbiny = (numpy.ceil(sh/1./npix).astype('i4') for sh in im.shape)
xg = numpy.linspace(0, im.shape[0], nbinx+1).astype('i4')
yg = numpy.linspace(0, im.shape[1], nbiny+1).astype('i4')
val = numpy.zeros((nbinx, nbiny), dtype='f4')
usedpix = numpy.zeros((nbinx, nbiny), dtype='f4')
if weight is None:
weight = numpy.ones_like(im, dtype='f4')
if numpy.all(weight == 0):
return im*0
# annoying!
for i in range(nbinx):
for j in range(nbiny):
use = weight[xg[i]:xg[i+1], yg[j]:yg[j+1]] > 0
usedpix[i, j] = numpy.sum(use)
if usedpix[i, j] > 0:
val[i, j] = estimate_sky_background(
im[xg[i]:xg[i+1], yg[j]:yg[j+1]][use])
val[usedpix < 20] = 0.
usedpix[usedpix < 20] = 0.
from scipy.ndimage.filters import gaussian_filter
count = 0
while numpy.any(usedpix == 0):
sig = 0.4
valc = gaussian_filter(val*(usedpix > 0), sig, mode='constant')
weightc = gaussian_filter((usedpix != 0).astype('f4'), sig,
mode='constant')
m = (usedpix == 0) & (weightc > 1.e-10)
val[m] = valc[m]/weightc[m]
usedpix[m] = 1
count += 1
if count > 100:
m = usedpix == 0
val[m] = numpy.median(im)
print('Sky estimation failed badly.')
break
x = numpy.arange(im.shape[0])
y = numpy.arange(im.shape[1])
xc = (xg[:-1]+xg[1:])/2.
yc = (yg[:-1]+yg[1:])/2.
from scipy.ndimage import map_coordinates
xp = numpy.interp(x, xc, numpy.arange(len(xc), dtype='f4'))
yp = numpy.interp(y, yc, numpy.arange(len(yc), dtype='f4'))
xpa = xp.reshape(-1, 1)*numpy.ones(len(yp)).reshape(1, -1)
ypa = yp.reshape(1, -1)*numpy.ones(len(xp)).reshape(-1, 1)
coord = [xpa.ravel(), ypa.ravel()]
bg = map_coordinates(val, coord, mode='nearest', order=order)
bg = bg.reshape(im.shape)
return bg
def get_sizes(x, y, imbs, weight=None, blist=None):
x = numpy.round(x).astype('i4')
y = numpy.round(y).astype('i4')
peakbright = imbs[x, y]
sz = numpy.zeros(len(x), dtype='i4')
cutoff = 1000
sz[peakbright > cutoff] = 59
sz[peakbright <= cutoff] = 19 # for the moment...
# for very bright things, use a bigger PSF
# but if there are too many of these, don't bother.
cutoff2 = 20000
if ((numpy.sum(peakbright > cutoff2) < numpy.sum(peakbright > cutoff)/2)
or (numpy.sum(peakbright > cutoff) < 100)):
sz[peakbright > cutoff2] = 149
else:
print('Too many bright sources, using smaller PSF stamp size...')
if weight is not None:
sz[weight[x, y] == 0] = 149 # saturated/off edge sources get big PSF
# sources near listed sources get very big PSF
if blist is not None and len(x) > 0:
for xb, yb in zip(blist[0], blist[1]):
dist2 = (x-xb)**2 + (y-yb)**2
indclose = numpy.argmin(dist2)
if dist2[indclose] < 5**2:
sz[indclose] = 299
return sz
def fit_im_force(im, x, y, psf, weight=None, dq=None, psfderiv=True,
nskyx=0, nskyy=0, refit_psf=False,
niter=4, blist=None, derivcentroids=False, refit_sky=True,
startsky=numpy.nan):
repeat = 3 if psfderiv else 1
guessflux = None
msky = 0
model = 0
if len(x) == 0:
raise ValueError('must force some sources')
if derivcentroids and not psfderiv:
raise ValueError('derivcentroids only makes sense when psfderiv '
'is true')
for titer in range(niter):
for c, s in zip((x, y), im.shape):
if numpy.any((c < -0.499) | (c > s-0.501)):
c[:] = numpy.clip(c, -0.499, s-0.501)
print('Some positions within 0.01 pix of edge of image '
'clipped back to 0.01 pix inside image.')
if (refit_sky and
((titer > 0) or numpy.any(~numpy.isfinite(startsky)))):
sky = sky_im(im-model, weight=weight, npix=100)
else:
sky = startsky
sz = get_sizes(x, y, im-sky-msky, weight=weight, blist=blist)
minsz = numpy.min(sz)
psfs = [numpy.zeros((len(x), minsz, minsz), dtype='f4')
for i in range(repeat)]
if guessflux is not None:
guess = guessflux.copy()
else:
guess = None
# should really only be done once in refit_psf=False case
psfsfull = build_psf_list(x, y, psf, sz, psfderiv=psfderiv)
# need to package some "tiling" around this eventually, probably?
flux, model, msky = fit_once(
im-sky, x, y, psfsfull,
psfderiv=psfderiv, weight=weight, guess=guess,
nskyx=nskyx, nskyy=nskyy)
import gc
gc.collect()
flux = flux[0]
skypar = flux[len(x)*repeat:]
guessflux = flux[:len(x)*repeat:repeat]
for i in range(repeat):
psfs[i][...] = [psfmod.central_stamp(psfsfull[i][j], minsz)
for j in range(len(psfsfull[i]))]
centroids = compute_centroids(x, y, psfs, flux, im-(sky+msky),
im-model-sky,
weight, derivcentroids=derivcentroids)
xcen, ycen, stamps = centroids
if refit_psf:
psf, x, y = refit_psf_from_stamps(psf, x, y, xcen, ycen,
stamps)
# we are letting the positions get updated, even when
# psfderiv is false, only for the mean shift that
# gets introduced when we recentroid all the stars.
# we could eliminate this by replacing the above with
# psf, _, _ = refit_psf_from_stamps(...)
# for WISE at the moment, this should _mostly_ introduce
# a mean shift, and potentially also a small subpixel-offset
# related shift.
if psfderiv:
if derivcentroids:
maxstep = 1
else:
maxstep = 3
dcen = numpy.sqrt(xcen**2 + ycen**2)
m = dcen > maxstep
xcen[m] /= dcen[m]
ycen[m] /= dcen[m]
x, y = (numpy.clip(c, -0.499, s-0.501)
for c, s in zip((x+xcen, y+ycen), im.shape))
print('Iteration %d, median sky %6.2f' %
(titer+1, numpy.median(sky+msky)))
stats = compute_stats(x-numpy.round(x), y-numpy.round(y),
stamps[0], stamps[2], stamps[3], stamps[1], flux)
if dq is not None:
stats['flags'] = extract_im(x, y, dq).astype('i4')
stats['sky'] = extract_im(x, y, sky+msky).astype('f4')
stars = OrderedDict([('x', x), ('y', y), ('flux', flux),
('deltx', xcen), ('delty', ycen)] +
[(f, stats[f]) for f in stats])
dtypenames = list(stars.keys())
dtypeformats = [stars[n].dtype for n in dtypenames]
dtype = dict(names=dtypenames, formats=dtypeformats)
stars = numpy.fromiter(zip(*stars.values()),
dtype=dtype, count=len(stars['x']))
res = (stars, model+sky, sky+msky, psf)
return res
def refit_psf_from_stamps(psf, x, y, xcen, ycen, stamps, name=None,
plot=False):
# how far the centroids of the model PSFs would
# be from (0, 0) if instantiated there
# this initial definition includes the known offset (since
# we instantiated off a pixel center), and the model offset
xe, ye = psfmod.simple_centroid(
psfmod.central_stamp(stamps[4], censize=stamps[0].shape[-1]))
# now we subtract the known offset
xe -= x-numpy.round(x)
ye -= y-numpy.round(y)
if hasattr(psf, 'fitfun'):
psffitfun = psf.fitfun
npsf = psffitfun(x, y, xcen+xe, ycen+ye, stamps[0],
stamps[1], stamps[2], stamps[3], nkeep=200,
name=name, plot=plot)
if npsf is not None:
npsf.fitfun = psffitfun
else:
shiftx = xcen + xe + x - numpy.round(x)
shifty = ycen + ye + y - numpy.round(y)
npsf = find_psf(x, shiftx, y, shifty,
stamps[0], stamps[3], stamps[1])
# we removed the centroid offset of the model PSFs;
# we need to correct the positions to compensate
if npsf is not None:
xnew = x + xe
ynew = y + ye
psf = npsf
else:
xnew = x
ynew = y
return psf, xnew, ynew
def fit_im(im, psf, weight=None, dq=None, psfderiv=True,
nskyx=0, nskyy=0, refit_psf=False,
verbose=False, miniter=4, maxiter=10, blist=None,
maxstars=40000, derivcentroids=False,
ntilex=1, ntiley=1, fewstars=100, threshold=5,
ccd=None, plot=False, titer_thresh=2, blendthreshu=2,
psfvalsharpcutfac=0.7, psfsharpsat=0.7):
if isinstance(weight, int):
weight = numpy.ones_like(im)*weight
model = numpy.zeros_like(im)
xa = numpy.zeros(0, dtype='f4')
ya = xa.copy()
lsky = numpy.median(im[weight > 0])
hsky = numpy.median(im[weight > 0])
msky = numpy.zeros_like(im)
passno = numpy.zeros(0, dtype='i4')
guessflux, guesssky = None, None
titer = -1
lastiter = -1
skypar = {} # best sky parameters so far.
roughfwhm = psfmod.neff_fwhm(psf(im.shape[0]//2, im.shape[1]//2))
roughfwhm = numpy.max([roughfwhm, 3.])
while True:
titer += 1
hsky = sky_im(im-model, weight=weight, npix=20)
lsky = sky_im(im-model, weight=weight, npix=50*roughfwhm)
if titer != lastiter:
# in first passes, do not split sources!
blendthresh = blendthreshu if titer < titer_thresh else 0.2
xn, yn = peakfind(im-model-hsky,
model-msky, weight, dq, psf,
keepsat=(titer == 0),
blendthreshold=blendthresh,
threshold=threshold,
psfvalsharpcutfac=psfvalsharpcutfac,
psfsharpsat=psfsharpsat)
if len(xa) > 0 and len(xn) > 0:
keep = neighbor_dist(xn, yn, xa, ya) > 1.5
xn, yn = (c[keep] for c in (xn, yn))
if (titer == 0) and (blist is not None):
xnb, ynb = add_bright_stars(xn, yn, blist, im)
xn = numpy.concatenate([xn, xnb]).astype('f4')
yn = numpy.concatenate([yn, ynb]).astype('f4')
xa, ya = (numpy.concatenate([xa, xn]).astype('f4'),
numpy.concatenate([ya, yn]).astype('f4'))
passno = numpy.concatenate([passno, numpy.zeros(len(xn))+titer])
else:
xn, yn = numpy.zeros(0, dtype='f4'), numpy.zeros(0, dtype='f4')
if titer != lastiter:
if (titer == maxiter-1) or (
(titer >= miniter-1) and (len(xn) < fewstars)) or (
len(xa) > maxstars):
lastiter = titer + 1
# we probably don't want the sizes to change very much. hsky certainly
# will change a bit from iteration to iteration, though.
sz = get_sizes(xa, ya, im-hsky-msky, weight=weight, blist=blist)
if guessflux is not None:
guess = numpy.concatenate([guessflux, numpy.zeros_like(xn)])
else:
guess = None
sky = hsky if titer >= 2 else lsky
# in final iteration, no longer allow shifting locations; just fit
# centroids.
tpsfderiv = psfderiv if lastiter != titer else False
repeat = 1+tpsfderiv*2
if len(sz) != 0:
minsz = numpy.min(sz)
else:
minsz = 19
psfs = [numpy.zeros((len(xa), minsz, minsz), dtype='f4')
for i in range(repeat)]
flux = numpy.zeros(len(xa)*repeat, dtype='f4')
for (bdxf, bdxl, bdxaf, bdxal, bdyf, bdyl, bdyaf, bdyal) in (
subregions(im.shape, ntilex, ntiley)):
mbda = in_bounds(xa, ya, [bdxaf-0.5, bdxal-0.5],
[bdyaf-0.5, bdyal-0.5])
mbd = in_bounds(xa, ya, [bdxf-0.5, bdxl-0.5],
[bdyf-0.5, bdyl-0.5])
psfsbda = build_psf_list(xa[mbda], ya[mbda], psf, sz[mbda],
psfderiv=tpsfderiv)
sall = numpy.s_[bdxaf:bdxal, bdyaf:bdyal]
spri = numpy.s_[bdxf:bdxl, bdyf:bdyl]
dx, dy = bdxal-bdxaf, bdyal-bdyaf
sfit = numpy.s_[bdxf-bdxaf:dx+bdxl-bdxal,
bdyf-bdyaf:dy+bdyl-bdyal]
weightbda = weight[sall] if weight is not None else None
guessmbda = guess[mbda] if guess is not None else None
guesssky = skypar.get((bdxf, bdyf))
guessmbda = (numpy.concatenate([guessmbda, guesssky])
if guessmbda is not None else None)
tflux, tmodel, tmsky = fit_once(
im[sall]-sky[sall], xa[mbda]-bdxaf, ya[mbda]-bdyaf, psfsbda,
psfderiv=tpsfderiv, weight=weightbda, guess=guessmbda,
nskyx=nskyx, nskyy=nskyy)
model[spri] = tmodel[sfit]
msky[spri] = tmsky[sfit]
ind = numpy.flatnonzero(mbd)
ind2 = numpy.flatnonzero(mbd[mbda])
for i in range(repeat):
flux[ind*repeat+i] = tflux[0][ind2*repeat+i]
skypar[(bdxf, bdyf)] = flux[numpy.sum(mbda)*repeat:]
for i in range(repeat):
if len(ind2) == 0:
continue
psfs[i][mbd] = [psfmod.central_stamp(psfsbda[i][tind], minsz)
for tind in ind2]
# try to free memory! Not sure where the circular reference
# could be, but this makes a factor of a few difference
# in peak memory usage on fields with lots of stars with
# large models...
del psfsbda
import gc
gc.collect()
centroids = compute_centroids(xa, ya, psfs, flux, im-(sky+msky),
im-model-sky,
weight, derivcentroids=derivcentroids)
xcen, ycen, stamps = centroids
if titer == lastiter:
stats = compute_stats(xa- | numpy.round(xa) | numpy.round |
from torch.utils.data import Dataset
import torch
import numpy as np
import random
import torch.nn.functional as F
from torchvision import transforms
from torchvision.transforms.functional import normalize
import PIL
from PIL import Image
import os
from .transform import Compose, RandomCrop, RandomRotation, RandomVerticalFlip,RandomHorizontalFlip
from .preprocess import *
# from .extract_patches import create_patch_idx
class TrainDataset(Dataset):
def __init__(self, imgs,masks,fovs,vessels,patches_idx,mode,args):
self.imgs = imgs
self.masks = masks
self.fovs = fovs
self.vessels = vessels
self.patch_h, self.patch_w = args.train_patch_height, args.train_patch_width
self.patches_idx = patches_idx
self.inside_FOV = args.inside_FOV
self.transforms = None
if mode == "train":
self.transforms = Compose([
# RandomResize([56,72],[56,72]),
RandomCrop((48,48)),
# RandomFlip_LR(prob=0.5),
# RandomFlip_UD(prob=0.5),
# RandomRotate()
RandomRotation(10),
RandomVerticalFlip(),
RandomHorizontalFlip()
])
def __len__(self):
return len(self.patches_idx)
def __getitem__(self, idx):
n, x_center, y_center = self.patches_idx[idx]
data = self.imgs[n,:,y_center-int(self.patch_h/2):y_center+int(self.patch_h/2),x_center-int(self.patch_w/2):x_center+int(self.patch_w/2)]
mask = self.masks[n,:,y_center-int(self.patch_h/2):y_center+int(self.patch_h/2),x_center-int(self.patch_w/2):x_center+int(self.patch_w/2)]
vessel = self.vessels[n, :, y_center - int(self.patch_h / 2):y_center + int(self.patch_h / 2),
x_center - int(self.patch_w / 2):x_center + int(self.patch_w / 2)]
if self.transforms:
data = Image.fromarray((np.squeeze(data)*255.).astype(np.uint8))
mask = Image.fromarray((np.squeeze(mask)*255.).astype(np.uint8))
vessel = Image.fromarray((np.squeeze(vessel)*255.).astype(np.uint8))
data, mask, vessel = self.transforms([data, mask, vessel])
data = np.expand_dims(np.array(data),0)
mask = np.expand_dims(np.array(mask),0)
vessel = np.expand_dims(np.array(vessel),0)
data = data / 255.
mask = mask / 255.
vessel = vessel / 255.
data = torch.from_numpy(data).float()
mask = torch.from_numpy(mask).long()
vessel = torch.from_numpy(vessel).long()
# if self.transforms:
# data, mask = self.transforms(data, mask)
return data, mask.squeeze(0), vessel.squeeze(0)
def data_load(file_path):
img_list = []
gt_list = []
fov_list = []
vessel_list = []
with open(file_path, 'r') as file_to_read:
while True:
lines = file_to_read.readline().strip() # read a line
if not lines:
break
img, gt, fov, vessel = lines.split(' ')
img_list.append(img)
gt_list.append(gt)
fov_list.append(fov)
vessel_list.append(vessel)
imgs = None
groundTruth = None
FOVs = None
VS = None
for i in range(len(img_list)):
img = np.asarray(PIL.Image.open(img_list[i])) #0-255
gt = np.asarray( PIL.Image.open(gt_list[i])) # 0,1,2,3,4,5
vs = np.asarray( PIL.Image.open(vessel_list[i])) # 0,255
# import pdb
# pdb.set_trace()
if len(gt.shape) == 3:
gt = gt[:, :, 0]
if len(vs.shape) == 3:
vs = vs[:, :, 0]
fov = np.asarray(PIL.Image.open(fov_list[i])) # 0,255
if len(fov.shape) == 3:
fov = fov[:, :, 0]
imgs = np.expand_dims(img, 0) if imgs is None else np.concatenate((imgs, | np.expand_dims(img, 0) | numpy.expand_dims |
from typing import Tuple, List, Optional
import numpy as np
from numpy import ndarray
from anml.models.interface import TrimmingCompatibleModel
from anml.parameter.parameter import Parameter
from sfma.data import Data
from sfma.models.utils import build_linear_constraint, log_erfc
from scipy.special import erfc
class MarginalModel(TrimmingCompatibleModel):
"""Marginal model for stochastic frontier.
"""
def __init__(self, params: List[Parameter]):
super().__init__()
self._w = None
if not all([isinstance(param, Parameter) for param in params]):
raise TypeError("params must be a list of Parameter.")
param_names = [param.param_name for param in params]
if "eta" not in param_names:
raise ValueError("MarginalModel requires parameter eta.")
if "gamma" not in param_names:
raise ValueError("MarginalModel requires parameter gamma.")
if not any(["beta" in x for x in param_names]):
raise ValueError("MarginalModel requires parameter beta.")
self.params = {
param.param_name: param
for param in params
}
# extract constraints information
self.lb = np.hstack([self.params[name].lb_fe for name in param_names])
self.ub = np.hstack([self.params[name].ub_fe for name in param_names])
self.C, self.c_lb, self.c_ub = build_linear_constraint([
(self.params[name].constr_matrix_fe,
self.params[name].constr_lb_fe,
self.params[name].constr_ub_fe)
for name in param_names
])
@property
def beta_names(self) -> List[str]:
betas = []
for key, val in self.params:
if "beta" in key:
betas.append(key)
return betas
@property
def fevar_size(self) -> int:
num_fe = 0
for beta in self.beta_names:
num_fe += self.params[beta].num_fe
return num_fe
@property
def revar_size(self) -> int:
return self.params["gamma"].num_fe
@property
def ievar_size(self) -> int:
return self.params["eta"].num_fe
@property
def var_sizes(self) -> List[int]:
return [self.fevar_size, self.revar_size, self.ievar_size]
@property
def var_size(self) -> int:
return sum(self.var_sizes)
@property
def femat(self) -> ndarray:
mats = []
for beta in self.beta_names:
mats.append(self.params[beta].design_matrix_fe)
return np.hstack(mats)
@property
def remat(self) -> ndarray:
return self.params["gamma"].design_matrix_fe
@property
def iemat(self) -> ndarray:
return self.params["eta"].design_matrix_fe
def get_vars(self, x: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
variables = np.split(x, np.cumsum([self.var_sizes])[:-1])
beta = variables[0]
gamma = np.sqrt(variables[1]**2)
eta = np.sqrt(variables[2]**2)
return beta, gamma, eta
@property
def w(self):
return self._w
@w.setter
def w(self, weights: np.ndarray):
if any(weights < 0. or weights > 1.):
raise ValueError("Weights are not between 0 and 1.")
self._w = weights
# pylint:disable=unbalanced-tuple-unpacking
def _objective(self, x: ndarray, data: Data) -> ndarray:
"""
Objective function
"""
beta, gamma, eta = self.get_vars(x)
r = data.obs - self.femat.dot(beta)
v_re = np.sum(self.remat**2*gamma, axis=1)
v_ie = np.sum(self.iemat**2*eta, axis=1)
v = data.obs_var + v_re + v_ie
z = np.sqrt(v_ie)*r/np.sqrt(2.0*v*(data.obs_var + v_re))
return 0.5 * r ** 2 / v + 0.5 * np.log(v) - log_erfc(z)
def objective(self, x: ndarray, data: Data) -> float:
obj = self._objective(x=x, data=data)
if self.w is not None:
obj = self.w.dot(obj)
return np.mean(obj)
def _gradient(self, x: ndarray, data: Data) -> ndarray:
beta, gamma, eta = self.get_vars(x)
r = data.obs - self.femat.dot(beta)
v_re = np.sum(self.remat ** 2 * gamma, axis=1)
v_ie = np.sum(self.iemat ** 2 * eta, axis=1)
v_roe = data.obs_var + v_re
v = data.obs_var + v_re + v_ie
z = | np.sqrt(v_ie) | numpy.sqrt |
# Copyright 2021 The Commplax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import pandas as pd
from scipy import signal, special
from commplax import op
import matplotlib.pyplot as plt
import quantumrandom
def randpam(s, n, p=None):
a = np.linspace(-s+1, s-1, s)
return np.random.choice(a, n, p=p) + 1j * np.random.choice(a, n, p=p)
def randqam(s, n, p=None):
m = np.int(np.sqrt(s))
a = np.linspace(-m+1, m-1, m, dtype=np.float64)
return np.random.choice(a, n, p=p) + 1j * np.random.choice(a, n, p=p)
def grayenc_int(x):
x = np.asarray(x, dtype=int)
return x ^ (x >> 1)
def graydec_int(x):
x = np.atleast_1d(np.asarray(x, dtype=int))
mask = np.array(x)
while mask.any():
I = mask > 0
mask[I] >>= 1
x[I] ^= mask[I]
return x
def qamgrayenc_int(x, L):
"""
<NAME>., <NAME>., <NAME>. and <NAME>., 2001.
Constellation labeling for linear encoders. IEEE Transactions
on Information Theory, 47(6), pp.2417-2431.
"""
x = np.asarray(x, dtype=int)
M = int(np.sqrt(L))
B = int(np.log2(M))
x1 = x // M
x2 = x % M
return (grayenc_int(x1) << B) + grayenc_int(x2)
def qamgraydec_int(x, L):
x = np.asarray(x, dtype=int)
M = int(np.sqrt(L))
B = int(np.log2(M))
x1 = graydec_int(x >> B)
x2 = graydec_int(x % (1 << B))
return x1 * M + x2
def pamdecision(x, L):
x = np.asarray(x)
y = np.atleast_1d((np.round(x / 2 + 0.5) - 0.5) * 2).astype(int)
# apply bounds
bd = L - 1
y[y > bd] = bd
y[y < -bd] = -bd
return y
def qamdecision(x, L):
x = np.atleast_1d(x)
M = int(np.sqrt(L))
if any(np.iscomplex(x)):
I = pamdecision(np.real(x), M)
Q = pamdecision(np.imag(x), M)
y = I + 1j*Q
else: # is tuple
I = pamdecision(x[0], M)
Q = pamdecision(x[1], M)
y = (I, Q)
return y
def qammod(x, L):
x = np.asarray(x, dtype=int)
M = int(np.sqrt(L))
A = np.linspace(-M+1, M-1, M, dtype=np.float64)
C = A[None,:] + 1j*A[::-1, None]
d = qamgraydec_int(x, L)
return C[d // M, d % M]
def qamdemod(x, L):
x = np.asarray(x)
M = int(np.sqrt(L))
x = qamdecision(x, L)
c = ((np.real(x) + M - 1) // 2).astype(int)
r = ((M - 1 - np.imag(x)) // 2).astype(int)
d = qamgrayenc_int(r * M + c, L)
return d
def int2bit(d, M):
M = np.asarray(M, dtype=np.int)
d = np.atleast_1d(d).astype(np.uint8)
b = np.unpackbits(d[:,None], axis=1)[:,-M:]
return b
def bit2int(b, M):
b = np.asarray(b, dtype=np.uint8)
d = np.packbits(np.pad(b.reshape((-1,M)), ((0,0),(8-M,0))))
return d
def grayqamplot(L):
M = int(np.sqrt(L))
x = range(L)
y = qammod(x, L)
fstr = "{:0" + str(M) + "b}"
I = np.real(y)
Q = | np.imag(y) | numpy.imag |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 13:28:53 2018
@author: badat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from sklearn.metrics import average_precision_score,f1_score,precision_score,recall_score
import pandas as pd
import pdb
#%%
def get_compress_type(file_name):
compression_type = ''
if 'ZLIB' in file_name:
compression_type = 'ZLIB'
elif 'GZIP' in file_name:
compression_type = 'GZIP'
return compression_type
def compute_AP(predictions,labels):
num_class = predictions.shape[1]
ap=np.zeros(num_class)
ap_pascal = np.zeros(num_class)
for idx_cls in range(num_class):
prediction = np.squeeze(predictions[:,idx_cls])
label = np.squeeze(labels[:,idx_cls])
# mask = np.abs(label)==1
# if np.sum(label>0)==0:
# continue
binary_label=np.clip(label,0,1)
ap[idx_cls]=average_precision_score(binary_label,prediction)#average_precision_score(binary_label,prediction[mask])
ap_pascal[idx_cls]=calc_pr_ovr_noref(binary_label,prediction)[-1]
return ap,ap_pascal
def evaluate(iterator,tensors,features,logits,sess,is_train=None):
if is_train is not None:
print('switch to inference model')
sess.run(is_train.assign(False))
sess.run(iterator.initializer)
predictions = []
labels = []
while True:
try:
img_ids_v,features_v,labels_v = sess.run(tensors)
feed_dict = {features:features_v}
logits_v = sess.run(logits, feed_dict)
predictions.append(logits_v)
labels.append(labels_v)
except tf.errors.OutOfRangeError:
print('end')
break
predictions = np.concatenate(predictions)
labels = np.concatenate(labels)
assert predictions.shape==labels.shape,'invalid shape'
if is_train is not None:
sess.run(is_train.assign(True))
return compute_AP(predictions,labels)
def evaluate_latent_noise(iterator,tensors,features,logits,sess,is_train=None):
if is_train is not None:
print('switch to inference model')
sess.run(is_train.assign(False))
sess.run(iterator.initializer)
v_predictions = []
h_predictions = []
labels = []
while True:
try:
img_ids_v,features_v,labels_v = sess.run(tensors)
feed_dict = {features:features_v}
logits_v = sess.run(logits, feed_dict)
v_predictions.append(logits_v[0])
h_predictions.append(logits_v[1])
labels.append(labels_v)
except tf.errors.OutOfRangeError:
print('end')
break
v_predictions = np.concatenate(v_predictions)
h_predictions = np.concatenate(h_predictions)
labels = np.concatenate(labels)
assert v_predictions.shape==labels.shape,'invalid shape'
if is_train is not None:
sess.run(is_train.assign(True))
ap_v,ap_v_pascal = compute_AP(v_predictions,labels)
ap_h,ap_h_pascal = compute_AP(h_predictions,labels)
return ap_v,ap_v_pascal,ap_h,ap_h_pascal
#%%
def calc_pr_ovr_noref(counts, out):
"""
[P, R, score, ap] = calc_pr_ovr(counts, out, K)
Input :
counts : number of occurrences of this word in the ith image
out : score for this image
K : number of references
Output :
P, R : precision and recall
score : score which corresponds to the particular precision and recall
ap : average precision
"""
#binarize counts
counts = np.array(counts > 0, dtype=np.float32);
tog = np.hstack((counts[:,np.newaxis].astype(np.float64), out[:, np.newaxis].astype(np.float64)))
ind = np.argsort(out)
ind = ind[::-1]
score = np.array([tog[i,1] for i in ind])
sortcounts = np.array([tog[i,0] for i in ind])
tp = sortcounts;
fp = sortcounts.copy();
for i in range(sortcounts.shape[0]):
if sortcounts[i] >= 1:
fp[i] = 0.;
elif sortcounts[i] < 1:
fp[i] = 1.;
P = np.cumsum(tp)/(np.cumsum(tp) + np.cumsum(fp));
numinst = np.sum(counts);
R = np.cumsum(tp)/numinst
ap = voc_ap(R,P)
return P, R, score, ap
def voc_ap(rec, prec):
"""
ap = voc_ap(rec, prec)
Computes the AP under the precision recall curve.
"""
rec = rec.reshape(rec.size,1); prec = prec.reshape(prec.size,1)
z = np.zeros((1,1)); o = np.ones((1,1));
mrec = np.vstack((z, rec, o))
mpre = np.vstack((z, prec, z))
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
I = np.where(mrec[1:] != mrec[0:-1])[0]+1;
ap = 0;
for i in I:
ap = ap + (mrec[i] - mrec[i-1])*mpre[i];
return ap
#%%
def count_records(file_name):
c = 0
for record in tf.python_io.tf_record_iterator(file_name):
c += 1
return c
def preprocessing_graph(G):
np.fill_diagonal(G,1)
for idx_col in range(G.shape[1]):
normalizer = np.sum(G[:,idx_col])
G[:,idx_col] = G[:,idx_col]*1.0/normalizer
return G
def generate_missing_signature(attributes,fractions):
n_attr = attributes.shape[1]
n_sample = attributes.shape[0]
Masks = np.zeros((attributes.shape[0],attributes.shape[1],len(fractions)),dtype=np.float32)
for idx_f in range(len(fractions)):
select_fraction = fractions[idx_f]
for idx_a in range(n_attr):
sub_l = np.zeros(n_sample)
attr = attributes[:,idx_a]
pos_idx=np.where(attr>0)[0]
n_pos =len(pos_idx)
if n_pos > 0:
n_sub_pos = max(int(n_pos*select_fraction),1)
sub_pos_idx = np.random.choice(pos_idx,n_sub_pos,False)
sub_l[sub_pos_idx]=attr[sub_pos_idx]
neg_idx=np.where(attr<0)[0]
n_neg = len(neg_idx)
if n_neg > 0:
n_sub_neg = max(int(n_neg*select_fraction),1)
sub_neg_idx = np.random.choice(neg_idx,n_sub_neg,False)
sub_l[sub_neg_idx]=attr[sub_neg_idx]
Masks[:,idx_a,idx_f]=sub_l
return Masks
#%% label mapping function
def LoadLabelMap(attr_name_file, class_name_file):
attr_name = []
class_name = []
with open(attr_name_file,"r") as f:
lines=f.readlines()
for line in lines:
idx,name=line.rstrip('\n').split(' ')
attr_name.append(name)
with open(class_name_file,"r") as f:
lines=f.readlines()
for line in lines:
idx,name=line.rstrip('\n').split(' ')
class_name.append(name)
return attr_name,class_name
def LoadClassSignature(class_signature_file):
signatures = []
with open(class_signature_file,"r") as f:
lines=f.readlines()
for line in lines:
attrs=line.rstrip('\n').split(' ')
signatures.append(attrs)
return np.array(signatures).astype(np.float32)/100
def quantizeSignature_mean(signatures):
signatures_q = np.ones(signatures.shape)*-1
signatures_m = np.mean(signatures,axis=0)
signatures_s_m = signatures-signatures_m[np.newaxis,:]
signatures_q[signatures_s_m>=0]=1
signatures_q[signatures_s_m<0]=0
return signatures_q
def quantizeSignature(signatures):
signatures_q = np.ones(signatures.shape)*-1
signatures_q[signatures>=0.5]=1
signatures_q[signatures<0.5]=0
return signatures_q
def quantizeSignature_0(signatures):
signatures_q = np.ones(signatures.shape)*-1
signatures_q[signatures>0]=1
signatures_q[signatures<=0]=0
return signatures_q
def DAP(sigmoid_Predictions,signatures_q,signatures):
n = sigmoid_Predictions.shape[0]
T = signatures_q[:,:,np.newaxis]*np.ones((1,1,n))
prior = np.mean(signatures_q,0)
# eliminate degenerative prior
prior[prior==0]=0.5
prior[prior==1]=0.5
#
clss_prior = np.multiply(signatures_q,prior)+np.multiply(1-signatures_q,1-prior)
log_clss_prior = np.sum(np.log(clss_prior),1)
#
P_T = sigmoid_Predictions[:,:,np.newaxis].T
Inter = np.multiply(T,P_T)+np.multiply(1-T,1-P_T)
Score=np.sum(np.log(Inter),axis=1)
# calibrate prior
Score_calibrate = Score - log_clss_prior[:,np.newaxis]
return Score_calibrate
#def DAP_sum(Predictions,signatures_q,signatures):
# n = Predictions.shape[0]
# T = signatures_q[:,:,np.newaxis]*np.ones((1,1,n))
# prior = np.mean(signatures_q,0)
# # eliminate degenerative prior
# prior[prior==0]=0.5
# prior[prior==1]=0.5
# #
# clss_prior = np.multiply(signatures_q,prior)+np.multiply(1-signatures_q,1-prior)
# clss_prior = np.sum(clss_prior,1)
# #
# P_T = Predictions[:,:,np.newaxis].T
# Inter = np.multiply(T,P_T)+np.multiply(1-T,1-P_T)
# Score=np.sum(Inter,axis=1)
# # calibrate prior
# Score_calibrate = Score / clss_prior[:,np.newaxis]
#
# return Score_calibrate
def mean_acc(pred,true):
clss = np.unique(true)
acc = np.ones(len(clss))*-1
for idx_c,c in enumerate(clss):
pred_clss = pred[true==c]
acc[idx_c] = np.sum(pred_clss==c)*1.0/len(pred_clss)
return acc
def zeroshot_evaluation(Score_calibrate,t_labels,seen,unseen,mode = 'mean_acc'):
t_labels = np.squeeze(t_labels)
seen_tst_set = np.array([idx_s for idx_s in range(len(t_labels)) if t_labels[idx_s] in seen])
unseen_tst_set = np.array([idx_s for idx_s in range(len(t_labels)) if t_labels[idx_s] in unseen])
unconsider_class = np.array([idx_c for idx_c in range(Score_calibrate.shape[1]) if (idx_c not in seen) and (idx_c not in unseen)])
acc_u_u=-1
acc_u_a=-1
acc_s_s=-1
acc_s_a=-1
# for idx_c in unseen:
# print(np.sum(t_labels==idx_c))
# pdb.set_trace()
if len(unconsider_class)>0:
print('-'*30)
print('detect unconsider class: ',unconsider_class.shape)
print('-'*30)
Score_calibrate[:,unconsider_class] = -1000
if len(unseen_tst_set)>0:
#u->a
Score_calibrate_u_a = Score_calibrate[unseen_tst_set,:]
clss_u_a = np.argmax(Score_calibrate_u_a,1)
if mode == 'mean_acc':
acc_u_a = np.mean(mean_acc(clss_u_a,t_labels[unseen_tst_set]))#
else:
acc_u_a = np.sum((clss_u_a-t_labels[unseen_tst_set])==0)*1.0/len(unseen_tst_set)
#u->u
Score_calibrate_u_u = Score_calibrate_u_a[:,unseen]
clss_u_u = np.argmax(Score_calibrate_u_u,1)
clss_u_u=np.array([unseen[l] for l in clss_u_u])
if mode == 'mean_acc':
acc_u_u = np.mean(mean_acc(clss_u_u,t_labels[unseen_tst_set]))##
else:
acc_u_u = np.sum((clss_u_u-t_labels[unseen_tst_set])==0)*1.0/len(unseen_tst_set)
if len(seen_tst_set)>0:
#s->a
Score_calibrate_s_a = Score_calibrate[seen_tst_set,:]
clss_s_a = np.argmax(Score_calibrate_s_a,1)
if mode == 'mean_acc':
acc_s_a = np.mean(mean_acc(clss_s_a,t_labels[seen_tst_set]))#
else:
acc_s_a = np.sum((clss_s_a-t_labels[seen_tst_set])==0)*1.0/len(seen_tst_set)
#s->s
Score_calibrate_s_s = Score_calibrate_s_a[:,seen]
clss_s_s = np.argmax(Score_calibrate_s_s,1)
clss_s_s=np.array([seen[l] for l in clss_s_s])
if mode == 'mean_acc':
acc_s_s = np.mean(mean_acc(clss_s_s,t_labels[seen_tst_set]))#
else:
acc_s_s =np.sum((clss_s_s-t_labels[seen_tst_set])==0)*1.0/len(seen_tst_set)
return acc_u_u,acc_u_a,acc_s_s,acc_s_a
#%%
def signature_completion(Label_completion_v,sparse_dict_label_v,signature_q,quantization):
unique_labels = np.unique(sparse_dict_label_v)
signature_comp=np.zeros(signature_q.shape)
for l in unique_labels:
mask_l = sparse_dict_label_v == l
signature_comp[l,:]=np.mean(Label_completion_v[mask_l,:],0)
if quantization:
raise Exception('not implemented')
# signature_comp[signature_comp>0]=1
# signature_comp[signature_comp<0]=-1
return signature_comp
def evaluate_completion(signature_comp,signature_q,quantization):
mask_comp = np.sum(np.abs(signature_comp),1)!=0
if quantization:
return | np.sum((signature_comp!=signature_q)[mask_comp,:],1) | numpy.sum |
#!/user/bin/env python
# -*- coding:utf-8 -*-
import cv2
import numpy as np
def gaussianFilter(img):
height, width = img.shape
# 1.对图像进行填充,填充后的图像尺寸为(2M*2N)
p = 2 * height
q = 2 * width
fillimg = np.zeros((p, q), np.float)
midimg = | np.zeros((p, q), np.float) | numpy.zeros |
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
from deap import gp
import numpy as np
import random
from operator import attrgetter
import parameters
import sys
# # =============================================================================
# # Fixed parameters
# # =============================================================================
RANDOM_SEED = parameters.RANDOM_SEED
POPULATION_SIZE = parameters.POPULATION_SIZE
MAX_TIME_HORIZON = parameters.MAX_TIME_HORIZON
MUTATION_RATE = parameters.MUTATION_RATE
MAX_GENERATIONS = parameters.MAX_GENERATIONS
CROSSOVER_RATE = parameters.CROSSOVER_RATE
MIN_TIME_HORIZON = parameters.MIN_TIME_HORIZON
INITIAL_PRICE = parameters.INITIAL_PRICE
TOURNAMENT_SIZE = parameters.TOURNAMENT_SIZE
INITIAL_ASSETS = parameters.INITIAL_ASSETS
INITIAL_CASH = parameters.INITIAL_CASH
MIN_VALUATION = parameters.MIN_VALUATION
MAX_VALUATION = parameters.MAX_VALUATION
PROBA_TF = parameters.PROBA_TF
PROBA_VI = parameters.PROBA_VI
toolbox = base.Toolbox()
# Create the fitness object
creator.create("fitness_strategy", base.Fitness, weights=(1.0,))
# Create the individual object
creator.create("individual", list, typecode="d", fitness=creator.fitness_strategy)
# Create the individual list
toolbox.register(
"generate_tf_strategy", random.randint, MIN_TIME_HORIZON, MAX_TIME_HORIZON
)
toolbox.register("generate_wealth", random.randint, 0, 0)
toolbox.register("generate_cash", random.randint, INITIAL_CASH, INITIAL_CASH)
toolbox.register("generate_asset", random.randint, INITIAL_ASSETS, INITIAL_ASSETS)
toolbox.register("generate_loan", random.randint, 0, 0)
toolbox.register("generate_trading_signal", random.randint, 0, 0)
toolbox.register("generate_excess_demand", random.randint, 0, 0)
toolbox.register("generate_profit", random.randint, 0, 0)
toolbox.register("generate_ema", random.randint, 0, 0)
toolbox.register("generate_margin", random.randint, 0, 0)
toolbox.register(
"generate_tf_individual",
tools.initCycle,
creator.individual,
(
toolbox.generate_tf_strategy,
toolbox.generate_wealth,
toolbox.generate_cash,
toolbox.generate_asset,
toolbox.generate_loan,
toolbox.generate_trading_signal,
toolbox.generate_excess_demand,
toolbox.generate_profit,
toolbox.generate_ema,
toolbox.generate_margin,
),
n=1,
)
toolbox.register(
"tf_population_creation", tools.initRepeat, list, toolbox.generate_tf_individual
)
toolbox.register(
"generate_tf_individual2",
tools.initCycle,
creator.individual,
(toolbox.generate_tf_strategy),
n=1,
)
toolbox.register(
"tf_population_creation2", tools.initRepeat, list, toolbox.generate_tf_individual2
)
toolbox.register("generate_no_asset", random.randint, 0, 0)
toolbox.register(
"generate_hyper_tf_individual",
tools.initCycle,
creator.individual,
(
toolbox.generate_tf_strategy,
toolbox.generate_wealth,
toolbox.generate_cash,
toolbox.generate_no_asset,
toolbox.generate_loan,
toolbox.generate_trading_signal,
toolbox.generate_excess_demand,
toolbox.generate_profit,
toolbox.generate_ema,
toolbox.generate_margin,
),
n=1,
)
toolbox.register(
"generate_hyper_tf_individual2",
tools.initRepeat,
creator.individual,
toolbox.generate_tf_strategy,
n=1,
)
toolbox.register("generate_vi_strategy", random.randint, MIN_VALUATION, MAX_VALUATION)
toolbox.register(
"generate_vi_individual",
tools.initCycle,
creator.individual,
(
toolbox.generate_vi_strategy,
toolbox.generate_wealth,
toolbox.generate_cash,
toolbox.generate_asset,
toolbox.generate_loan,
toolbox.generate_trading_signal,
toolbox.generate_excess_demand,
toolbox.generate_profit,
toolbox.generate_ema,
toolbox.generate_margin,
),
n=1,
)
toolbox.register(
"vi_population_creation", tools.initRepeat, list, toolbox.generate_vi_individual
)
toolbox.register(
"generate_vi_individual2",
tools.initCycle,
creator.individual,
(toolbox.generate_vi_strategy),
n=1,
)
toolbox.register(
"vi_population_creation2", tools.initRepeat, list, toolbox.generate_vi_individual2
)
toolbox.register(
"generate_hyper_vi_individual",
tools.initCycle,
creator.individual,
(
toolbox.generate_vi_strategy,
toolbox.generate_wealth,
toolbox.generate_cash,
toolbox.generate_no_asset,
toolbox.generate_loan,
toolbox.generate_trading_signal,
toolbox.generate_excess_demand,
toolbox.generate_profit,
toolbox.generate_ema,
toolbox.generate_margin,
),
n=1,
)
toolbox.register(
"generate_hyper_vi_individual2",
tools.initRepeat,
creator.individual,
toolbox.generate_vi_strategy,
n=1,
)
def determine_mixed_strategy(PROBA_TF, PROBA_VI):
global types
rd = random.random()
if rd <= PROBA_TF:
types = np.vstack((types, "TF"))
return toolbox.generate_tf_strategy()
elif rd > PROBA_TF and rd <= PROBA_TF + PROBA_VI:
types = np.vstack((types, "VI"))
return toolbox.generate_vi_strategy()
toolbox.register("generate_mix_strategy", determine_mixed_strategy, PROBA_TF, PROBA_VI)
def create_mixed_population(POPULATION_SIZE, PROBA_TF, PROBA_VI):
global types
types = np.array(["NA"])
def determine_mixed_strategy(PROBA_TF, PROBA_VI):
global types
rd = random.random()
if rd <= PROBA_TF:
types = np.vstack((types, "TF"))
return toolbox.generate_tf_strategy()
elif rd > PROBA_TF and rd <= PROBA_TF + PROBA_VI:
types = np.vstack((types, "VI"))
return toolbox.generate_vi_strategy()
toolbox.register(
"generate_mix_strategy", determine_mixed_strategy, PROBA_TF, PROBA_VI
)
# toolbox.register("generate_mix_individual", tools.initCycle, creator.individual,
# (toolbox.generate_mix_strategy, toolbox.generate_wealth,
# toolbox.generate_cash, toolbox.generate_asset,
# toolbox.generate_loan, toolbox.generate_trading_signal,
# toolbox.generate_excess_demand,toolbox.generate_profit,
# toolbox.generate_ema, toolbox.generate_margin), n=1)
toolbox.register(
"generate_mix_individual",
tools.initRepeat,
creator.individual,
toolbox.generate_mix_strategy,
n=1,
)
toolbox.register(
"mix_population_creation",
tools.initRepeat,
list,
toolbox.generate_mix_individual,
)
pop = toolbox.mix_population_creation(n=POPULATION_SIZE)
return pop, types
toolbox.register(
"generate_hyper_mix_individual",
tools.initCycle,
creator.individual,
(
toolbox.generate_mix_strategy,
toolbox.generate_wealth,
toolbox.generate_cash,
toolbox.generate_no_asset,
toolbox.generate_loan,
toolbox.generate_trading_signal,
toolbox.generate_excess_demand,
toolbox.generate_profit,
toolbox.generate_ema,
toolbox.generate_margin,
),
n=1,
)
""" TO REMOVE """
# Fitness definition
def ema_fitness(individual):
return (individual[8],)
toolbox.register("evaluate", ema_fitness)
# Fitness definition
def ema_fitness2(balance_sheet, index_list):
# print("ema_fitnss2")
fit_vec = []
for i in index_list:
fit_vec.append(balance_sheet[i, 7])
# print(fit_vec)
# return balance_sheet[index_list,7]
return (fit_vec,)
toolbox.register("evaluate2", ema_fitness2)
def set_fitness(population, balance_sheet):
for i in range(len(population)):
print("i " + str(i))
population[i].fitness = balance_sheet[i, 7]
def fitness_for_all(population, balance_sheet):
index_list = []
for i in range(len(population)):
index_list.append(i)
# print(invalid_index_list)
# freshIndividuals = [ind for ind in population]
freshFitnessValues = ema_fitness2(balance_sheet, index_list)
# print("fresh fitness values ")
# print(freshFitnessValues)
for individual, fitnessValue in zip(population, freshFitnessValues):
individual.fitness.values = fitnessValue
# for i in range(len(population)):
# population[i].fitness.values = fitnessValue[i]
print("Fitness value")
print(fitnessValue)
fitnessValues = fitnessValue.copy()
return fitnessValues
# Function to recompute fitness of invalid individuals
def fitness_for_invalid(offspring):
freshIndividuals = [ind for ind in offspring if not ind.fitness.valid]
# print(freshIndividuals)
# print(type(freshIndividuals))
freshFitnessValues = list(map(toolbox.evaluate, freshIndividuals))
# print(freshFitnessValues)
for individual, fitnessValue in zip(freshIndividuals, freshFitnessValues):
individual.fitness.values = fitnessValue
# Function to recompute fitness of invalid individuals using balance sheet
def fitness_for_invalid2(population, balance_sheet):
invalid_index_list = []
for i in range(len(population)):
if not population[i].fitness.valid:
invalid_index_list.append(i)
# print("invalid index list")
# print(invalid_index_list) #this works
freshIndividuals = [ind for ind in population if not ind.fitness.valid]
# freshFitnessValues = list(map(toolbox.evaluate2, balance_sheet, invalid_index_list))
freshFitnessValues = ema_fitness2(balance_sheet, invalid_index_list)
print(freshFitnessValues)
for individual, fitnessValue in zip(freshIndividuals, freshFitnessValues):
individual.fitness.values = fitnessValue
# Creating our own crossover operator:
def feasible_crossover(ind1, ind2, CROSSOVER_RATE):
if random.random() < CROSSOVER_RATE:
upperb = max(ind1, ind2)[0]
lowerb = min(ind1, ind2)[0]
ind1[0] = random.randint(lowerb, upperb)
ind2[0] = random.randint(lowerb, upperb)
return ind1[0], ind2[0]
toolbox.register("feasible_crossover", feasible_crossover)
toolbox.register("mate", toolbox.feasible_crossover)
# Creating our own mutation operator
def mutate_both_ways(ind):
if random.random() < 0.5:
ind[0] -= 1
else:
ind[0] += 1
def feasible_mutation(ind, MUTATION_RATE):
if random.random() < MUTATION_RATE:
if ind[0] == MAX_TIME_HORIZON: # we can only mutate lower
ind[0] -= 1
elif ind[0] == 1: # we can only mutate higher
ind[0] += 1
else:
mutate_both_ways(ind) # we can mutate lower or higher
return ind
toolbox.register("feasible_mutation", feasible_mutation)
toolbox.register("mutate", toolbox.feasible_mutation)
def random_decimal(low, high):
# number = float(random.randint(low*1000, high*1000))/1000
global number
if low >= 0 and high >= 0:
number = float(random.randint(round(low * 1000), round(high * 1000)) / 1000)
if low < 0 and high < 0:
number = -float(random.randint(round(-low * 1000), round(-high * 1000)) / 1000)
return number
def selRandom(individuals, k):
return [random.choice(individuals) for i in range(k)]
# Creation of our customised selection operator (outnrament) that handles positive & negative fitness values
def selTournament(individuals, k, tournsize, fit_attr="fitness"):
chosen = []
for i in range(k):
chosen_i = []
aspirants = selRandom(individuals, tournsize - 1)
aspirants.append(individuals[i])
chosen_i = max(aspirants, key=attrgetter(fit_attr))
chosen_i[1:10] = individuals[i][1:10]
chosen.append(chosen_i)
return chosen
toolbox.register("selTournament", selTournament)
toolbox.register("select", toolbox.selTournament)
""" TO REMOVE """
# Define the hypermutation (insolvency) parameter
round_replacements = 0
def hypermutate(pop):
pop_temp = list(map(toolbox.clone, pop))
round_replacements = 0
for i in range(0, len(pop_temp)):
# if pop_temp[i][1] + pop_temp[i][9] <= 0:
if pop_temp[i][1] <= 0:
pop_temp[i] = toolbox.generate_hyper_tf_individual()
del pop_temp[i].fitness.values
# global round_replacements
round_replacements += 1
pop[:] = pop_temp
return pop, round_replacements
toolbox.register("hypermutate", hypermutate)
# Define the hypermutation (insolvency) parameter
def hypermutate2(pop_ex, pop_op, types, balance_sheet, mode):
types_temp = np.copy(types)
balance_sheet_temp = np.copy(balance_sheet)
pop_ex_temp = pop_ex.copy()
pop_op_temp = pop_op.copy()
if mode == "extended":
PROBA_TF = parameters.PROBA_TF
PROBA_VI = parameters.PROBA_VI
PROBA_GP = 0
if mode == "open":
PROBA_GP = 1
PROBA_TF = 0
PROBA_VI = 0
if mode == "combined":
PROBA_GP = parameters.PROBA_GP
PROBA_TF = parameters.PROBA_TF
PROBA_VI = parameters.PROBA_VI
round_replacements = 0
""" Hypermutate pop_ex """
for i in range(0, POPULATION_SIZE):
if balance_sheet[i][0] <= 0:
# print("i = " + str(i))
"""Agent is insolvent. Delete"""
if i < len(pop_ex):
# del pop_ex_temp[i].fitness.values
del pop_ex_temp[i].fitness
del pop_ex_temp[i]
elif i >= len(pop_ex):
del pop_op_temp[i - len(pop_ex)].fitness.values
del pop_op_temp[i - len(pop_ex)]
balance_sheet_temp = np.delete(balance_sheet_temp, (i), axis=0)
types_temp = np.delete(types_temp, (i), axis=0)
""" Replace """
ind_bs = np.array([0, INITIAL_CASH, INITIAL_ASSETS, 0, 0, 0, 0, 0, 0])
balance_sheet_temp = np.vstack((balance_sheet_temp, ind_bs))
# Draw the type to choose what pop and type to append
if PROBA_GP == 1:
"""The incoming agent is of type GP"""
# add = gp.create-population(.POPULATIOn_SIZE...)
# pop_op_temp.append(add)
types_temp = np.vstack((types_temp, "GP"))
round_replacements += 1
elif PROBA_TF == 1:
"""The incoming agent is of type TF"""
pop_ex_temp.append(toolbox.generate_hyper_tf_individual2())
types_temp = np.vstack((types_temp, "TF"))
round_replacements += 1
elif PROBA_VI == 1:
"""The incoming agent is of type VI"""
pop_ex_temp.append(toolbox.generate_hyper_vi_individual2())
types_temp = np.vstack((types_temp, "VI"))
round_replacements += 1
else:
rd = random.uniform(0, PROBA_TF + PROBA_VI + PROBA_GP)
if rd <= PROBA_TF:
types_temp = np.vstack((types_temp, "TF"))
round_replacements += 1
pop_ex_temp.append(toolbox.generate_hyper_tf_individual2())
elif rd > PROBA_TF and rd <= PROBA_TF + PROBA_VI:
types_temp = np.vstack((types_temp, "VI"))
round_replacements += 1
pop_ex_temp.append(toolbox.generate_hyper_vi_individual2())
elif rd > PROBA_TF + PROBA_VI:
# add = gp.create-population(.POPULATIOn_SIZE...)
# pop_op_temp.append(add)
types_temp = | np.vstack((types_temp, "GP")) | numpy.vstack |
import pandas as pd
import numpy as np
df_simple = pd.DataFrame(np.arange(12).reshape(3, 4))
print(df_simple)
# 0 1 2 3
# 0 0 1 2 3
# 1 4 5 6 7
# 2 8 9 10 11
print(df_simple.values)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(type(df_simple.values))
# <class 'numpy.ndarray'>
print(df_simple.columns)
# RangeIndex(start=0, stop=4, step=1)
print(type(df_simple.columns))
# <class 'pandas.core.indexes.range.RangeIndex'>
print(df_simple.index)
# RangeIndex(start=0, stop=3, step=1)
print(type(df_simple.index))
# <class 'pandas.core.indexes.range.RangeIndex'>
print(list(df_simple.columns))
# [0, 1, 2, 3]
print(type(list(df_simple.columns)))
# <class 'list'>
print(df_simple.columns.tolist())
# [0, 1, 2, 3]
print(type(df_simple.columns.tolist()))
# <class 'list'>
df = pd.DataFrame( | np.arange(12) | numpy.arange |
from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.spatial import cKDTree, KDTree, SphericalVoronoi, distance
except ImportError:
pass
from .common import Benchmark
class Build(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
['KDTree', 'cKDTree'],
]
param_names = ['(m, n, r)', 'class']
def setup(self, mnr, cls_name):
self.cls = KDTree if cls_name == 'KDTree' else cKDTree
m, n, r = mnr
np.random.seed(1234)
self.data = np.concatenate((np.random.randn(n//2,m),
np.random.randn(n-n//2,m)+np.ones(m)))
self.queries = np.concatenate((np.random.randn(r//2,m),
np.random.randn(r-r//2,m)+np.ones(m)))
def time_build(self, mnr, cls_name):
"""
Constructing kd-tree
=======================
dim | # points | time
"""
m, n, r = mnr
if cls_name == 'cKDTree_flat':
self.T = self.cls(self.data, leafsize=n)
else:
self.cls(self.data)
LEAF_SIZES = [8, 128]
BOX_SIZES = [None, 1.0]
class Query(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
[1, 2, np.inf],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'boxsize', 'leafsize']
@staticmethod
def do_setup(self, mnr, p, boxsize, leafsize):
m, n, r = mnr
np.random.seed(1234)
self.data = np.random.uniform(size=(n, m))
self.queries = np.random.uniform(size=(r, m))
self.T = cKDTree(self.data, leafsize=leafsize, boxsize=boxsize)
def setup(self, mnr, p, boxsize, leafsize):
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query(self, mnr, p, boxsize, leafsize):
"""
Querying kd-tree
dim | # points | # queries | KDTree | cKDTree | flat cKDTree
"""
self.T.query(self.queries, p=p)
class Radius(Benchmark):
params = [
[(3,10000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'probe radius', 'boxsize', 'leafsize']
def __init__(self):
self.time_query_pairs.__func__.params = list(self.params)
self.time_query_pairs.__func__.params[0] = [(3,1000,30),
(8,1000,30),
(16,1000,30)]
def setup(self, mnr, p, probe_radius, boxsize, leafsize):
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query_ball_point(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_ball_point(self.queries, probe_radius, p=p)
def time_query_pairs(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_pairs(probe_radius, p=p)
class Neighbors(Benchmark):
params = [
[(3,1000,1000),
(8,1000,1000),
(16,1000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n1, n2)', 'p', 'probe radius', 'boxsize', 'leafsize']
def setup(self, mn1n2, p, probe_radius, boxsize, leafsize):
m, n1, n2 = mn1n2
self.data1 = np.random.uniform(size=(n1, m))
self.data2 = np.random.uniform(size=(n2, m))
self.T1 = cKDTree(self.data1, boxsize=boxsize, leafsize=leafsize)
self.T2 = cKDTree(self.data2, boxsize=boxsize, leafsize=leafsize)
def time_sparse_distance_matrix(self, mn1n2, p, probe_radius, boxsize, leafsize):
self.T1.sparse_distance_matrix(self.T2, probe_radius, p=p)
def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize):
"""
Count neighbors kd-tree
dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize
"""
self.T1.count_neighbors(self.T2, probe_radius, p=p)
def generate_spherical_points(num_points):
# generate uniform points on sphere (see:
# http://stackoverflow.com/a/23785326/2942522)
| np.random.seed(123) | numpy.random.seed |
"""
Computes different GAN metrics for a generator.
"""
import os
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
from torch_mimicry.metrics import compute_fid, compute_is, compute_kid
from torch_mimicry.utils import common
from torchvision import utils as vutils
from .distributed import *
from .pr_score import pr_score
from .fid_score import fid_score
from .compute_fid_with_index import fid_score_with_index
from .compute_fid_with_attr import fid_score_with_attr
from .pr_score_with_attr import partial_recall_score_with_attr
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class DRS(nn.Module):
def __init__(self, netG, netD, device, batch_size=256):
super().__init__()
self.netG = netG
self.netD = netD
self.maximum = -100000
self.device = device
self.batch_size = batch_size
self.init_drs()
def get_fake_samples_and_ldr(self, num_data):
with torch.no_grad():
imgs = self.netG.generate_images(num_data, device=self.device)
netD_out = self.netD(imgs)
if type(netD_out) is tuple:
netD_out = netD_out[0]
ldr = netD_out.detach().cpu().numpy()
return imgs, ldr
def init_drs(self):
for i in range(50):
_, ldr = self.get_fake_samples_and_ldr(self.batch_size)
tmp_max = ldr.max()
if self.maximum < tmp_max:
self.maximum = tmp_max
def sub_rejection_sampler(self, fake_samples, ldr, eps=1e-6, gamma=None):
tmp_max = ldr.max()
if tmp_max > self.maximum:
self.maximum = tmp_max
ldr_max = ldr - self.maximum
F = ldr_max - np.log(1- np.exp(ldr_max-eps))
if gamma is None:
gamma = np.percentile(F, 80)
F = F - gamma
sigF = sigmoid(F)
psi = np.random.rand(len(sigF))
fake_x_sampled = [fake_samples[i].detach().cpu().numpy() for i in range(len(sigF)) if sigF[i] > psi[i]]
return torch.Tensor(fake_x_sampled)
def generate_images(self, num_images, device=None):
fake_samples_list = []
num_sampled = 0
if device is None:
device = self.device
while num_sampled < num_images:
fake_samples, ldrs = self.get_fake_samples_and_ldr(self.batch_size)
fake_samples_accepted = self.sub_rejection_sampler(fake_samples, ldrs)
fake_samples_list.append(fake_samples_accepted)
num_sampled += fake_samples_accepted.size(0)
fake_samples_all = torch.cat(fake_samples_list, dim=0)
return fake_samples_all[:num_images].to(device)
def visualize_images(self, log_dir, evaluate_step, num_images = 64):
img_dir = os.path.join(log_dir, 'images')
fake_samples = self.generate_images(num_images)
images_viz = vutils.make_grid(fake_samples,
padding=2,
normalize=True)
vutils.save_image(images_viz,
'{}/fake_samples_step_{}_after_drs.png'.format(img_dir, evaluate_step),
normalize=True)
def evaluate_drs(
metric,
netG,
netD_drs,
log_dir,
evaluate_range=None,
evaluate_step=None,
use_original_netD=False,
num_runs=1,
start_seed=0,
overwrite=False,
write_to_json=True,
device=None,
is_stylegan2=False,
**kwargs):
"""
Evaluates a generator over several runs.
Args:
metric (str): The name of the metric for evaluation.
netG (Module): Torch generator model to evaluate.
log_dir (str): The path to the log directory.
evaluate_range (tuple): The 3 valued tuple for defining a for loop.
evaluate_step (int): The specific checkpoint to load. Used in place of evaluate_range.
device (str): Device identifier to use for computation.
num_runs (int): The number of runs to compute FID for each checkpoint.
start_seed (int): Starting random seed to use.
write_to_json (bool): If True, writes to an output json file in log_dir.
overwrite (bool): If True, then overwrites previous metric score.
Returns:
None
"""
# Check evaluation range/steps
if evaluate_range and evaluate_step or not (evaluate_step
or evaluate_range):
raise ValueError(
"Only one of evaluate_step or evaluate_range can be defined.")
if evaluate_range:
if (type(evaluate_range) != tuple
or not all(map(lambda x: type(x) == int, evaluate_range))
or not len(evaluate_range) == 3):
raise ValueError(
"evaluate_range must be a tuple of ints (start, end, step).")
output_log_dir = log_dir / 'evaluate' / f'step-{evaluate_step}'
output_log_dir.mkdir(parents=True, exist_ok=True)
# Check metric arguments
if metric == 'kid':
if 'num_samples' not in kwargs:
raise ValueError(
"num_samples must be provided for KID computation.")
output_file = os.path.join(
output_log_dir, 'kid_{}k.json'.format(kwargs['num_samples'] // 1000))
elif metric == 'fid':
if 'num_real_samples' not in kwargs or 'num_fake_samples' not in kwargs:
raise ValueError(
"num_real_samples and num_fake_samples must be provided for FID computation."
)
output_file = os.path.join(
output_log_dir,
'fid_{}k_{}k.json'.format(kwargs['num_real_samples'] // 1000,
kwargs['num_fake_samples'] // 1000))
elif metric == 'inception_score':
if 'num_samples' not in kwargs:
raise ValueError(
"num_samples must be provided for IS computation.")
output_file = os.path.join(
output_log_dir,
'inception_score_{}k.json'.format(kwargs['num_samples'] // 1000))
elif metric == 'pr':
if 'num_real_samples' not in kwargs or 'num_fake_samples' not in kwargs:
raise ValueError(
"num_real_samples and num_fake_samples must be provided for PR computation."
)
output_file = os.path.join(
output_log_dir,
'pr_{}k_{}k.json'.format(kwargs['num_real_samples'] // 1000,
kwargs['num_fake_samples'] // 1000))
else:
choices = ['fid', 'kid', 'inception_score', 'pr']
raise ValueError("Invalid metric {} selected. Choose from {}.".format(
metric, choices))
# Check checkpoint dir
netG_ckpt_dir = os.path.join(log_dir, 'checkpoints', 'netG')
if use_original_netD:
ckpt_path = 'netD'
else:
ckpt_path = 'netD_drs'
netD_drs_ckpt_dir = os.path.join(log_dir, 'checkpoints', ckpt_path)
if not os.path.exists(netG_ckpt_dir):
raise ValueError(
"Checkpoint directory {} cannot be found in log_dir.".format(
netG_ckpt_dir))
# Check device
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Setup output file
if os.path.exists(output_file):
scores_dict = common.load_from_json(output_file)
scores_dict = dict([(int(k), v) for k, v in scores_dict.items()])
else:
scores_dict = {}
# Decide naming convention
names_dict = {
'fid': 'FID',
'inception_score': 'Inception Score',
'kid': 'KID',
'pr': 'PR',
}
# Evaluate across a range
start, end, interval = evaluate_range or (evaluate_step, evaluate_step,
evaluate_step)
for step in range(start, end + 1, interval):
# Skip computed scores
# if step in scores_dict and write_to_json and not overwrite:
# print("INFO: {} at step {} has been computed. Skipping...".format(
# names_dict[metric], step))
# continue
# Load and restore the model checkpoint
netG_ckpt_file = os.path.join(netG_ckpt_dir, 'netG_{}_steps.pth'.format(step))
netD_drs_ckpt_file = os.path.join(netD_drs_ckpt_dir, f'{ckpt_path}_{step}_steps.pth')
if not os.path.exists(netG_ckpt_file):
print("INFO: Checkpoint at step {} does not exist. Skipping...".
format(step))
continue
netG.restore_checkpoint(ckpt_file=netG_ckpt_file, optimizer=None)
if is_stylegan2:
ckpt = torch.load(netG_ckpt_file, map_location=lambda storage, loc: storage)
netD_drs.load_state_dict(ckpt["drs_d"] if "drs_d" in ckpt else ckpt["d"])
else:
netD_drs.restore_checkpoint(ckpt_file=netD_drs_ckpt_file, optimizer=None)
netG = DRS(netG=netG, netD=netD_drs, device=device)
#Visualize images after DRS
netG.visualize_images(log_dir = log_dir, evaluate_step = evaluate_step)
# Compute score for each seed
scores = []
if metric == 'pr':
scores = defaultdict(list)
for seed in range(start_seed, start_seed + num_runs):
print("INFO: Computing {} in memory...".format(names_dict[metric]))
# Obtain only the raw score without var
if metric == "fid":
score = compute_fid.fid_score(netG=netG,
seed=seed,
device=device,
log_dir=log_dir,
**kwargs)
elif metric == "inception_score":
score, _ = compute_is.inception_score(netG=netG,
seed=seed,
device=device,
log_dir=log_dir,
**kwargs)
elif metric == "kid":
score, _ = compute_kid.kid_score(netG=netG,
device=device,
seed=seed,
log_dir=log_dir,
**kwargs)
elif metric == "pr":
score = pr_score(netG=netG,
seed=seed,
device=device,
log_dir=log_dir,
**kwargs)
if metric == "pr":
for key in score:
scores[key].append(score[key])
print("INFO: {} (step {}) [seed {}]: {}".format(
key, step, seed, score[key]))
else:
scores.append(score)
print("INFO: {} (step {}) [seed {}]: {}".format(
names_dict[metric], step, seed, score))
scores_dict[step] = scores
# Save scores every step
if write_to_json:
common.write_to_json(scores_dict, output_file)
# Print the scores in order
for step in range(start, end + 1, interval):
if step in scores_dict:
if metric == "pr":
for key in scores_dict[step]:
scores = scores_dict[step][key]
mean = | np.mean(scores) | numpy.mean |
# See "d_bankfull" in update_flow_depth() ######## (2/21/13)
# NB! update_diversion() is currently COMMENTED OUT.
# See "(5/13/10)" for a temporary fix.
#------------------------------------------------------------------------
# Copyright (c) 2001-2020, <NAME>
#
# Apr 2020. Added set_new_defaults(), disable_all_output().
# Oct 2019. Added FLOOD_OPTION and CHECK_STABILITY flags.
# Sep 2014. Wrote new update_diversions().
# New standard names and BMI updates and testing.
# Nov 2013. Converted TopoFlow to a Python package.
# Feb 2013. Adapted to use EMELI framework.
# Jan 2013. Shared scalar doubles are now 0D numpy arrays.
# This makes them mutable and allows components with
# a reference to them to see them change.
# So far: Q_outlet, Q_peak, Q_min...
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2012. Commented out diversions.update() for now. #######
# May 2012. Shared scalar doubles are now 1-element 1D numpy arrays.
# This makes them mutable and allows components with
# a reference to them to see them change.
# So far: Q_outlet, Q_peak, Q_min...
# May 2010. Changes to initialize() and read_cfg_file()
# Mar 2010. Changed codes to code, widths to width,
# angles to angle, nvals to nval, z0vals to z0val,
# slopes to slope (for GUI tools and consistency
# across all process components)
# Aug 2009. Updates.
# Jul 2009. Updates.
# May 2009. Updates.
# Jan 2009. Converted from IDL.
#-----------------------------------------------------------------------
# NB! In the CFG file, change MANNING and LAW_OF_WALL flags to
# a single string entry like "friction method". #########
#-----------------------------------------------------------------------
# Notes: Set self.u in manning and law_of_wall functions ??
# Update friction factor in manning() and law_of_wall() ?
# Double check how Rh is used in law_of_the_wall().
# d8_flow has "flow_grids", but this one has "codes".
# Make sure values are not stored twice.
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# NOTES: This file defines a "base class" for channelized flow
# components as well as functions used by most or
# all channel flow methods. The methods of this class
# (especially "update_velocity") should be over-ridden as
# necessary for different methods of modeling channelized
# flow. See channels_kinematic_wave.py,
# channels_diffusive_wave.py and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
# NOTES: update_free_surface_slope() is called by the
# update_velocity() methods of channels_diffusive_wave.py
# and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
#
# class channels_component
#
# ## get_attribute() # (defined in each channel component)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/15/12)
# get_var_units() # (5/15/12)
#-----------------------------
# set_constants()
# set_missing_cfg_options() # (4/29/20)
# initialize()
# update()
# finalize()
# set_computed_input_vars() # (5/11/10)
#----------------------------------
# initialize_d8_vars() ########
# initialize_computed_vars()
# initialize_diversion_vars() # (9/22/14)
# initialize_outlet_values()
# initialize_peak_values()
# initialize_min_and_max_values() # (2/3/13)
#-------------------------------------
# update_flood_d8_vars() # (9/17/19, for flooding)
# update_R()
# update_R_integral()
# update_discharge()
# update_flood_discharge() # (9/20/19)
# update_diversions() # (9/22/14)
# update_flow_volume()
# update_flood_volume() # (9/20/19)
# update_flow_depth_LAST()
# update_flow_depth() # (9/16/19, update)
# update_flood_depth() # (9/20/19)
# update_free_surface_slope()
# update_shear_stress() # (9/9/14, depth-slope product)
# update_shear_speed() # (9/9/14)
# update_trapezoid_Rh()
# update_friction_factor() # (9/9/14)
#----------------------------------
# update_velocity() # (override as needed)
# update_velocity_on_edges()
# update_froude_number() # (9/9/14)
#----------------------------------
# update_outlet_values()
# update_peak_values() # (at the main outlet)
# update_Q_out_integral() # (moved here from basins.py)
# update_mins_and_maxes() # (don't add into update())
# update_total_channel_water_volume() # (9/17/19)
# update_total_land_water_volume() # (9/17/19)
# check_flow_depth()
# check_flow_velocity()
#----------------------------------
# open_input_files()
# read_input_files()
# close_input_files()
#----------------------------------
# update_outfile_names()
# bundle_output_files() # (9/21/14. Not used yet)
# disable_all_output() # (04/29/20)
# open_output_files()
# write_output_files()
# close_output_files()
# save_grids()
# save_pixel_values()
#----------------------------------
# manning_formula()
# law_of_the_wall()
# print_status_report()
# remove_bad_slopes()
# Functions: # (stand-alone versions of these)
# Trapezoid_Rh()
# Manning_Formula()
# Law_of_the_Wall()
#-----------------------------------------------------------------------
import numpy as np
import os, os.path
import copy
from topoflow.utils import BMI_base
from topoflow.utils import file_utils ###
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import ncgs_files ###
from topoflow.utils import ncts_files ###
from topoflow.utils import rtg_files ###
from topoflow.utils import text_ts_files ###
from topoflow.utils import tf_utils
#-------------------------------------------------------
# NOTE: Do not import "d8_base" itself, it won't work
#-------------------------------------------------------
from topoflow.components import d8_global as d8_base # (11/11/16)
## from topoflow.utils import tf_d8_base as d8_base
#-----------------------------------------------------------------------
class channels_component( BMI_base.BMI_component ):
#-----------------------------------------------------------
# Note: rainfall_volume_flux *must* be liquid-only precip.
#-----------------------------------------------------------
_input_var_names = [
'atmosphere_water__rainfall_volume_flux', # (P_rain)
'glacier_ice__melt_volume_flux', # (MR)
'land_surface_water__baseflow_volume_flux', # (GW)
'land_surface_water__evaporation_volume_flux', # (ET)
'soil_surface_water__infiltration_volume_flux', # (IN)
'snowpack__melt_volume_flux', # (SM)
'water-liquid__mass-per-volume_density' ] # (rho_H2O)
#------------------------------------------------------------------
# 'canals__count', # n_canals
# 'canals_entrance__x_coordinate', # canals_in_x
# 'canals_entrance__y_coordinate', # canals_in_y
# 'canals_entrance_water__volume_fraction', # Q_canals_fraction
# 'canals_exit__x_coordinate', # canals_out_x
# 'canals_exit__y_coordinate', # canals_out_y
# 'canals_exit_water__volume_flow_rate', # Q_canals_out
# 'sinks__count', # n_sinks
# 'sinks__x_coordinate', # sinks_x
# 'sinks__y_coordinate', # sinks_y
# 'sinks_water__volume_flow_rate', # Q_sinks
# 'sources__count', # n_sources
# 'sources__x_coordinate', # sources_x
# 'sources__y_coordinate', # sources_y
# 'sources_water__volume_flow_rate' ] # Q_sources
#----------------------------------
# Maybe add these out_vars later.
#----------------------------------
# ['time_sec', 'time_min' ]
_output_var_names = [
'basin_outlet_water_flow__half_of_fanning_friction_factor', # f_outlet
'basin_outlet_water_x-section__mean_depth', # d_outlet
'basin_outlet_water_x-section__peak_time_of_depth', # Td_peak
'basin_outlet_water_x-section__peak_time_of_volume_flow_rate', # T_peak
'basin_outlet_water_x-section__peak_time_of_volume_flux', # Tu_peak
'basin_outlet_water_x-section__time_integral_of_volume_flow_rate', # vol_Q
'basin_outlet_water_x-section__time_max_of_mean_depth', # d_peak
'basin_outlet_water_x-section__time_max_of_volume_flow_rate', # Q_peak
'basin_outlet_water_x-section__time_max_of_volume_flux', # u_peak
'basin_outlet_water_x-section__volume_flow_rate', # Q_outlet
'basin_outlet_water_x-section__volume_flux', # u_outlet
#--------------------------------------------------
'canals_entrance_water__volume_flow_rate', # Q_canals_in
#--------------------------------------------------
'channel_bottom_surface__slope', # S_bed
'channel_bottom_water_flow__domain_max_of_log_law_roughness_length', # z0val_max
'channel_bottom_water_flow__domain_min_of_log_law_roughness_length', # z0val_min
'channel_bottom_water_flow__log_law_roughness_length', # z0val
'channel_bottom_water_flow__magnitude_of_shear_stress', # tau
'channel_bottom_water_flow__shear_speed', # u_star
'channel_centerline__sinuosity', # sinu
'channel_water__volume', # vol
'channel_water_flow__froude_number', # froude
'channel_water_flow__half_of_fanning_friction_factor', # f
'channel_water_flow__domain_max_of_manning_n_parameter', # nval_max
'channel_water_flow__domain_min_of_manning_n_parameter', # nval_min
'channel_water_flow__manning_n_parameter', # nval
'channel_water_surface__slope', # S_free
#---------------------------------------------------
# These might only be available at the end of run.
#---------------------------------------------------
'channel_water_x-section__domain_max_of_mean_depth', # d_max
'channel_water_x-section__domain_min_of_mean_depth', # d_min
'channel_water_x-section__domain_max_of_volume_flow_rate', # Q_max
'channel_water_x-section__domain_min_of_volume_flow_rate', # Q_min
'channel_water_x-section__domain_max_of_volume_flux', # u_max
'channel_water_x-section__domain_min_of_volume_flux', # u_min
#---------------------------------------------------------------------
'channel_water_x-section__hydraulic_radius', # Rh
'channel_water_x-section__initial_mean_depth', # d0
'channel_water_x-section__mean_depth', # d
'channel_water_x-section__volume_flow_rate', # Q
'channel_water_x-section__volume_flux', # u
'channel_water_x-section__wetted_area', # A_wet
'channel_water_x-section__wetted_perimeter', # P_wet
####### Next one added for flooding: 2019-09-16. ########
'channel_water_x-section_top__width', # w_top
'channel_x-section_trapezoid_bottom__width', # width
'channel_x-section_trapezoid_side__flare_angle', # angle
####### Next one added for flooding: 2019-09-16. ########
'land_surface_water__depth', # df
'land_surface_water__runoff_volume_flux', # R
'land_surface_water__domain_time_integral_of_runoff_volume_flux', # vol_R
'model__time_step', # dt
'model_grid_cell__area', # da
#---------------------------------------------------------------------
'network_channel_water__volume', # vol_chan
'land_surface_water__area_integral_of_depth' ] # vol_land
################################################
# These come from input files, not from other components
_config_var_names = [
'channel_bottom_water_flow__log_law_roughness_length', # z0val
'channel_centerline__sinuosity', # sinu
'channel_water_flow__manning_n_parameter', # nval
'channel_water_x-section__bankfull_depth', # d_bankfull, NEW
'channel_water_x-section__bankfull_width', # w_bankfull, NEW
'channel_water_x-section__initial_mean_depth', # d0
# 'channel_water_x-section_top__width', # w_top
'channel_x-section_trapezoid_bottom__width', # width
'channel_x-section_trapezoid_side__flare_angle', # angle
# Next two vars can be obtained from d8 component.
# 'land_surface__elevation', # DEM
# 'land_surface__slope', # S_bed
'land_surface_water__depth' ] # df
_var_name_map = {
'atmosphere_water__rainfall_volume_flux': 'P_rain',
'glacier_ice__melt_volume_flux': 'MR',
# 'land_surface__elevation': 'DEM',
# 'land_surface__slope': 'S_bed',
'land_surface_water__baseflow_volume_flux': 'GW',
'land_surface_water__evaporation_volume_flux': 'ET',
'soil_surface_water__infiltration_volume_flux': 'IN',
'snowpack__melt_volume_flux': 'SM',
'water-liquid__mass-per-volume_density': 'rho_H2O',
#------------------------------------------------------------------------
'basin_outlet_water_flow__half_of_fanning_friction_factor':'f_outlet',
'basin_outlet_water_x-section__mean_depth': 'd_outlet',
'basin_outlet_water_x-section__peak_time_of_depth': 'Td_peak',
'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'T_peak',
'basin_outlet_water_x-section__peak_time_of_volume_flux': 'Tu_peak',
'basin_outlet_water_x-section__volume_flow_rate': 'Q_outlet',
'basin_outlet_water_x-section__volume_flux': 'u_outlet',
'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'vol_Q',
'basin_outlet_water_x-section__time_max_of_mean_depth': 'd_peak',
'basin_outlet_water_x-section__time_max_of_volume_flow_rate':'Q_peak',
'basin_outlet_water_x-section__time_max_of_volume_flux': 'u_peak',
#--------------------------------------------------------------------------
'canals_entrance_water__volume_flow_rate': 'Q_canals_in',
#--------------------------------------------------------------------------
'channel_bottom_surface__slope': 'S_bed',
'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'z0val_max',
'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'z0val_min',
'channel_bottom_water_flow__log_law_roughness_length': 'z0val',
'channel_bottom_water_flow__magnitude_of_shear_stress': 'tau',
'channel_bottom_water_flow__shear_speed': 'u_star',
'channel_centerline__sinuosity': 'sinu',
'channel_water__volume': 'vol',
'channel_water_flow__domain_max_of_manning_n_parameter': 'nval_max',
'channel_water_flow__domain_min_of_manning_n_parameter': 'nval_min',
'channel_water_flow__froude_number': 'froude',
'channel_water_flow__half_of_fanning_friction_factor': 'f',
'channel_water_flow__manning_n_parameter': 'nval',
'channel_water_surface__slope': 'S_free',
#-----------------------------------------------------------------------
'channel_water_x-section__domain_max_of_mean_depth': 'd_max',
'channel_water_x-section__domain_min_of_mean_depth': 'd_min',
'channel_water_x-section__domain_max_of_volume_flow_rate': 'Q_max',
'channel_water_x-section__domain_min_of_volume_flow_rate': 'Q_min',
'channel_water_x-section__domain_max_of_volume_flux': 'u_max',
'channel_water_x-section__domain_min_of_volume_flux': 'u_min',
#-----------------------------------------------------------------------
'channel_water_x-section__hydraulic_radius': 'Rh',
'channel_water_x-section__initial_mean_depth': 'd0',
'channel_water_x-section__mean_depth': 'd',
'channel_water_x-section__volume_flow_rate': 'Q',
'channel_water_x-section__volume_flux': 'u',
'channel_water_x-section__wetted_area': 'A_wet',
'channel_water_x-section__wetted_perimeter': 'P_wet',
## 'channel_water_x-section_top__width': # (not used)
'channel_x-section_trapezoid_bottom__width': 'width', ####
'channel_x-section_trapezoid_side__flare_angle': 'angle', ####
'land_surface_water__depth': 'df',
'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'vol_R',
'land_surface_water__runoff_volume_flux': 'R',
'model__time_step': 'dt',
'model_grid_cell__area': 'da',
#------------------------------------------------------------------
'canals__count': 'n_canals',
'canals_entrance__x_coordinate': 'canals_in_x',
'canals_entrance__y_coordinate': 'canals_in_y',
'canals_entrance_water__volume_fraction': 'Q_canals_fraction',
'canals_exit__x_coordinate': 'canals_out_x',
'canals_exit__y_coordinate': 'canals_out_y',
'canals_exit_water__volume_flow_rate': 'Q_canals_out',
'sinks__count': 'n_sinks',
'sinks__x_coordinate': 'sinks_x',
'sinks__y_coordinate': 'sinks_y',
'sinks_water__volume_flow_rate': 'Q_sinks',
'sources__count': 'n_sources',
'sources__x_coordinate': 'sources_x',
'sources__y_coordinate': 'sources_y',
'sources_water__volume_flow_rate': 'Q_sources',
#------------------------------------------------------------------
'network_channel_water__volume': 'vol_chan',
'land_surface_water__area_integral_of_depth': 'vol_land' }
#####################################
_var_units_map = {
'atmosphere_water__rainfall_volume_flux': 'm s-1',
'glacier_ice__melt_volume_flux': 'm s-1',
## 'land_surface__elevation': 'm',
## 'land_surface__slope': '1',
'land_surface_water__baseflow_volume_flux': 'm s-1',
'land_surface_water__evaporation_volume_flux': 'm s-1',
'soil_surface_water__infiltration_volume_flux': 'm s-1',
'snowpack__melt_volume_flux': 'm s-1',
'water-liquid__mass-per-volume_density': 'kg m-3',
#---------------------------------------------------------------------------
'basin_outlet_water_flow__half_of_fanning_friction_factor': '1',
'basin_outlet_water_x-section__mean_depth': 'm',
'basin_outlet_water_x-section__peak_time_of_depth': 'min',
'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'min',
'basin_outlet_water_x-section__peak_time_of_volume_flux': 'min',
'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'm3',
'basin_outlet_water_x-section__time_max_of_mean_depth': 'm',
'basin_outlet_water_x-section__time_max_of_volume_flow_rate': 'm3 s-1',
'basin_outlet_water_x-section__time_max_of_volume_flux': 'm s-1',
'basin_outlet_water_x-section__volume_flow_rate': 'm3 s-1',
'basin_outlet_water_x-section__volume_flux': 'm s-1',
#---------------------------------------------------------------------------
'canals_entrance_water__volume_flow_rate': 'm3 s-1',
#---------------------------------------------------------------------------
'channel_bottom_surface__slope': '1',
'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'm',
'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'm',
'channel_bottom_water_flow__log_law_roughness_length': 'm',
'channel_bottom_water_flow__magnitude_of_shear_stress': 'kg m-1 s-2',
'channel_bottom_water_flow__shear_speed': 'm s-1',
'channel_centerline__sinuosity': '1',
'channel_water__volume': 'm3',
'channel_water_flow__froude_number': '1',
'channel_water_flow__half_of_fanning_friction_factor': '1',
'channel_water_flow__manning_n_parameter': 'm-1/3 s',
'channel_water_flow__domain_max_of_manning_n_parameter': 'm-1/3 s',
'channel_water_flow__domain_min_of_manning_n_parameter': 'm-1/3 s',
'channel_water_surface__slope': '1',
#--------------------------------------------------------------------
'channel_water_x-section__domain_max_of_mean_depth': 'm',
'channel_water_x-section__domain_min_of_mean_depth': 'm',
'channel_water_x-section__domain_max_of_volume_flow_rate': 'm3 s-1',
'channel_water_x-section__domain_min_of_volume_flow_rate': 'm3 s-1',
'channel_water_x-section__domain_max_of_volume_flux': 'm s-1',
'channel_water_x-section__domain_min_of_volume_flux': 'm s-1',
#--------------------------------------------------------------------
'channel_water_x-section__hydraulic_radius': 'm',
'channel_water_x-section__initial_mean_depth': 'm',
'channel_water_x-section__mean_depth': 'm',
'channel_water_x-section__volume_flow_rate': 'm3 s-1',
'channel_water_x-section__volume_flux': 'm s-1',
'channel_water_x-section__wetted_area': 'm2',
'channel_water_x-section__wetted_perimeter': 'm',
'channel_x-section_trapezoid_bottom__width': 'm',
'channel_x-section_trapezoid_side__flare_angle': 'rad', # CHECKED
'land_surface_water__depth': 'm',
'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'm3',
'land_surface_water__runoff_volume_flux': 'm s-1',
'model__time_step': 's',
'model_grid_cell__area': 'm2',
#------------------------------------------------------------------
'canals__count': '1',
'canals_entrance__x_coordinate': 'm',
'canals_entrance__y_coordinate': 'm',
'canals_entrance_water__volume_fraction': '1',
'canals_exit__x_coordinate': 'm',
'canals_exit__y_coordinate': 'm',
'canals_exit_water__volume_flow_rate': 'm3 s-1',
'sinks__count': '1',
'sinks__x_coordinate': 'm',
'sinks__y_coordinate': 'm',
'sinks_water__volume_flow_rate': 'm3 s-1',
'sources__count': '1',
'sources__x_coordinate': 'm',
'sources__y_coordinate': 'm',
'sources_water__volume_flow_rate': 'm3 s-1',
#------------------------------------------------------------
'network_channel_water__volume': 'm3',
'land_surface_water__area_integral_of_depth': 'm3' }
#####################################
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_config_var_names(self):
# New, proposed BMI function
return self._config_var_names
# get_config_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
## def get_var_type(self, long_var_name):
##
## #---------------------------------------
## # So far, all vars have type "double",
## # but use the one in BMI_base instead.
## #---------------------------------------
## return 'float64'
##
## # get_var_type()
#-------------------------------------------------------------------
def set_constants(self):
#------------------------
# Define some constants
#------------------------
self.g = np.float64(9.81) # (gravitation const.)
self.aval = np.float64(0.476) # (integration const.)
self.kappa = np.float64(0.408) # (von Karman's const.)
self.law_const = np.sqrt(self.g) / self.kappa
self.one_third = np.float64(1.0) / 3.0
self.two_thirds = np.float64(2.0) / 3.0
self.deg_to_rad = np.pi / 180.0
self.rad_to_deg = 180.0 / np.pi
# set_constants()
#-------------------------------------------------------------------
def set_missing_cfg_options(self):
#------------------------------------------------------
# (2019-10-08) Added CHECK_STABILITY flag to CFG file
# so stability check be turned off to increase speed.
#------------------------------------------------------
if not(hasattr(self, 'CHECK_STABILITY')):
self.CHECK_STABILITY = True
#--------------------------------------------------------------
# (2019-10-03) Added FLOOD_OPTION flag to CFG file.
# If not(FLOOD_OPTION), don't write flood depths (all zeros).
#--------------------------------------------------------------
if not(hasattr(self, 'FLOOD_OPTION')):
self.FLOOD_OPTION = False
self.SAVE_DF_GRIDS = False
self.SAVE_DF_PIXELS = False
#---------------------------------------------
# Also new in 2019, not in older CFG files
# Not used then, but still need to be set.
# Need to be set if FLOOD_OPTION is False ??
#---------------------------------------------
if not(hasattr(self, 'd_bankfull_type')):
self.d_bankfull_type = 'Scalar' # or Grid
self.d_bankfull = 10.0 # [meters]
self.d_bankfull_file = ''
# set_missing_cfg_options()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver", SILENT=False):
if not(SILENT):
print(' ')
print('Channels component: Initializing...')
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants() # (12/7/09)
# print 'CHANNELS calling initialize_config_vars()...'
self.initialize_config_vars()
self.set_missing_cfg_options() # (2020-04-29)
# New option, see set_new_defaults().
if not(self.FLOOD_OPTION):
self.SAVE_DF_GRIDS = False # (still needed here)
self.SAVE_DF_PIXELS = False
self.d_flood_gs_file = ''
self.d_flood_ts_file = ''
#------------------------------------------------------------
# Must call read_grid_info() after initialize_config_vars()
#------------------------------------------------------------
# print 'CHANNELS calling read_grid_info()...'
#### self.read_grid_info() # NOW IN initialize_config_vars()
#------------------------------------------------------------
#print 'CHANNELS calling initialize_basin_vars()...'
self.initialize_basin_vars() # (5/14/10)
#-----------------------------------------
# This must come before "Disabled" test.
#-----------------------------------------
# print 'CHANNELS calling initialize_time_vars()...'
self.initialize_time_vars()
#----------------------------------
# Has component been turned off ?
#----------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print('Channels component: Disabled in CFG file.')
self.disable_all_output() # (04/29/2020)
self.DONE = True
self.status = 'initialized' # (OpenMI 2.0 convention)
return
## print '################################################'
## print 'min(d0), max(d0) =', self.d0.min(), self.d0.max()
## print '################################################'
#--------------------------------------------------------
# Since only Grid type is allowed, these are not set in
# the CFG file, but need to be defined for next part.
#--------------------------------------------------------
self.code_type = 'Grid' # (may not need this one)
self.slope_type = 'Grid'
##################################################################
# Move this block into new: "initialize_input_file_vars()" ???
#---------------------------------------------------
# Initialize vars to be read from files (11/16/16)
#---------------------------------------------------
# Need this in order to use "update_var()".
#----------------------------------------------------------
# NOTE: read_config_file() sets these to '0.0' if they
# are not type "Scalar", so self has the attribute.
#----------------------------------------------------------
if (self.slope_type.lower() != 'scalar'):
self.slope = self.initialize_var(self.slope_type, dtype='float64')
if (self.width_type.lower() != 'scalar'):
self.width = self.initialize_var(self.width_type, dtype='float64')
if (self.angle_type.lower() != 'scalar'):
self.angle = self.initialize_var(self.angle_type, dtype='float64')
if (self.sinu_type.lower() != 'scalar'):
self.sinu = self.initialize_var(self.sinu_type, dtype='float64')
if (self.d0_type.lower() != 'scalar'):
self.d0 = self.initialize_var(self.d0_type, dtype='float64')
#--------------------------------------------------------------------------------
if (self.d_bankfull_type.lower() != 'scalar'):
self.d_bankfull = self.initialize_var(self.d_bankfull_type, dtype='float64')
# if (self.w_bankfull_type.lower() != 'scalar'):
# self.w_bankfull = self.initialize_var(self.w_bankfull_type, dtype='float64')
#--------------------------------------------------------------------------------
if (self.MANNING):
if (self.nval_type.lower() != 'scalar'):
self.nval = self.initialize_var(self.nval_type, dtype='float64')
if (self.LAW_OF_WALL):
if (self.z0val_type.lower() != 'scalar'):
self.z0val = self.initialize_var(self.z0val_type, dtype='float64')
#------------------------------------------------------
# Must now do this before read_input_files (11/11/16)
#------------------------------------------------------
print('CHANNELS calling initialize_d8_vars()...')
self.initialize_d8_vars() # (depend on D8 flow grid)
#---------------------------------------------
# Open input files needed to initialize vars
#---------------------------------------------
# Can't move read_input_files() to start of
# update(), since initial values needed here.
#---------------------------------------------
# print 'CHANNELS calling open_input_files()...'
self.open_input_files()
print('CHANNELS calling read_input_files()...')
self.read_input_files()
#--------------------------------------------
# Set any input variables that are computed
#--------------------------------------------------
# NOTE: Must be called AFTER read_input_files().
#--------------------------------------------------
print('CHANNELS calling set_computed_input_vars()...')
self.set_computed_input_vars()
#-----------------------
# Initialize variables
#-----------------------
## print 'CHANNELS calling initialize_d8_vars()...'
## self.initialize_d8_vars() # (depend on D8 flow grid)
print('CHANNELS calling initialize_computed_vars()...')
self.initialize_computed_vars()
#--------------------------------------------------
# (5/12/10) I think this is obsolete now.
#--------------------------------------------------
# Make sure self.Q_ts_file is not NULL (12/22/05)
# This is only output file that is set by default
# and is still NULL if user hasn't opened the
# output var dialog for the channel process.
#--------------------------------------------------
## if (self.SAVE_Q_PIXELS and (self.Q_ts_file == '')):
## self.Q_ts_file = (self.case_prefix + '_0D-Q.txt')
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
def update(self, dt=-1.0):
## DEBUG = True
DEBUG = False
#---------------------------------------------
# Note that u and d from previous time step
# must be used on RHS of the equations here.
#---------------------------------------------
self.status = 'updating' # (OpenMI 2.0 convention)
#-------------------------------------------------------
# There may be times where we want to call this method
# even if component is not the driver. But note that
# the TopoFlow driver also makes this same call.
#-------------------------------------------------------
if (self.mode == 'driver'):
self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
### interval=0.5) # [seconds]
# For testing (5/19/12)
# self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s] CHANNEL')
#-------------------------------------------
# Read from files as needed to update vars
#-----------------------------------------------------
# NB! This is currently not needed for the "channel
# process" because values don't change over time and
# read_input_files() is called by initialize().
# NB! read_input_files() is called in initialize().
#-----------------------------------------------------
# if (self.time_index > 0):
# if (DEBUG): print('#### Calling read_input_files()...')
# self.read_input_files()
#-------------------------
# Update computed values
#-------------------------
if (self.FLOOD_OPTION):
if (DEBUG): print('#### Calling update_d8_vars()...')
self.update_flood_d8_vars() ############ (2019-09-17)
#------------------------------------------------------------
if (DEBUG): print('#### Calling update_R()...')
self.update_R()
if (DEBUG): print('#### Calling update_R_integral()...')
self.update_R_integral()
if (DEBUG): print('#### Calling update_channel_discharge()...')
self.update_channel_discharge()
#------------------------------------------------------------
if (self.FLOOD_OPTION):
if (DEBUG): print('#### Calling update_flood_discharge()...')
self.update_flood_discharge() ############ (2019-09-20)
if (DEBUG): print('#### Calling update_discharge()...')
self.update_discharge()
if (DEBUG): print('#### Calling update_diversions()...')
self.update_diversions()
if (DEBUG): print('#### Calling update_flow_volume()...')
self.update_flow_volume()
#------------------------------------------------------------
if (self.FLOOD_OPTION):
if (DEBUG): print('#### Calling update_flood_volume()...')
self.update_flood_volume() ############ (2019-09-20)
if (DEBUG): print('#### Calling update_flow_depth()...')
self.update_flow_depth()
#------------------------------------------------------------
if (self.FLOOD_OPTION):
if (DEBUG): print('#### Calling update_flood_depth()...')
self.update_flood_depth() ############ (2019-09-20)
#-----------------------------------------------------------------
if not(self.DYNAMIC_WAVE):
if (DEBUG): print('#### Calling update_trapezoid_Rh()...')
self.update_trapezoid_Rh()
# print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()a
#-----------------------------------------------------------------
# (9/9/14) Moved this here from update_velocity() methods.
#-----------------------------------------------------------------
if not(self.KINEMATIC_WAVE):
if (DEBUG): print('#### Calling update_free_surface_slope()...')
self.update_free_surface_slope()
if (DEBUG): print('#### Calling update_shear_stress()...')
self.update_shear_stress()
if (DEBUG): print('#### Calling update_shear_speed()...')
self.update_shear_speed()
#-----------------------------------------------------------------
# Must update friction factor before velocity for DYNAMIC_WAVE.
#-----------------------------------------------------------------
if (DEBUG): print('#### Calling update_friction_factor()...')
self.update_friction_factor()
#-----------------------------------------------------------------
if (DEBUG): print('#### Calling update_velocity()...')
self.update_velocity()
self.update_velocity_on_edges() # (set to zero)
if (DEBUG): print('#### Calling update_froude_number()...')
self.update_froude_number()
#-----------------------------------------------------------------
## print 'Rmin, Rmax =', self.R.min(), self.R.max()
## print 'Qmin, Qmax =', self.Q.min(), self.Q.max()
## print 'umin, umax =', self.u.min(), self.u.max()
## print 'dmin, dmax =', self.d.min(), self.d.max()
## print 'nmin, nmax =', self.nval.min(), self.nval.max()
## print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()
## print 'Smin, Smax =', self.S_bed.min(), self.S_bed.max()
if (DEBUG): print('#### Calling update_outlet_values()...')
self.update_outlet_values()
if (DEBUG): print('#### Calling update peak values()...')
self.update_peak_values()
if (DEBUG): print('#### Calling update_Q_out_integral()...')
self.update_Q_out_integral()
#---------------------------------------------
# This takes extra time and is now done
# only at the end, in finalize(). (8/19/13)
#---------------------------------------------
# But then "topoflow_driver" doesn't get
# correctly updated values for some reason.
#---------------------------------------------
## self.update_mins_and_maxes()
#--------------------------------------------------
# Check computed values (but not if known stable)
#--------------------------------------------------
if (self.CHECK_STABILITY):
D_OK = self.check_flow_depth()
U_OK = self.check_flow_velocity()
OK = (D_OK and U_OK)
else:
OK = True
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
# Components use own self.time_sec by default.
#-----------------------------------------------
if (DEBUG): print('#### Calling write_output_files()...')
self.write_output_files()
## self.write_output_files( time_seconds )
#-----------------------------
# Update internal clock
# after write_output_files()
#-----------------------------
if (DEBUG): print('#### Calling update_time()')
self.update_time( dt )
if (OK):
self.status = 'updated' # (OpenMI 2.0 convention)
else:
self.status = 'failed'
self.DONE = True
# update()
#-------------------------------------------------------------------
def finalize(self):
#---------------------------------------------------
# We can compute mins and maxes in the final grids
# here, but the framework will not then pass them
# to any component (e.g. topoflow_driver) that may
# need them.
#---------------------------------------------------
self.update_total_channel_water_volume() ## (9/17/19)
self.update_total_land_water_volume() ## (9/17/19)
## self.update_total_edge_water_volume() ## (5/7/20)
self.update_mins_and_maxes( REPORT=False ) ## (2/6/13)
self.print_final_report(comp_name='Channels component')
self.status = 'finalizing' # (OpenMI)
self.close_input_files() # TopoFlow input "data streams"
self.close_output_files()
self.status = 'finalized' # (OpenMI)
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#---------------------------------------------------------------
# Note: The initialize() method calls initialize_config_vars()
# (in BMI_base.py), which calls this method at the end.
#--------------------------------------------------------------
cfg_extension = self.get_attribute( 'cfg_extension' ).lower()
# cfg_extension = self.get_cfg_extension().lower()
self.KINEMATIC_WAVE = ("kinematic" in cfg_extension)
self.DIFFUSIVE_WAVE = ("diffusive" in cfg_extension)
self.DYNAMIC_WAVE = ("dynamic" in cfg_extension)
#-------------------------------------------
# These currently can't be set to anything
# else in the GUI, but need to be defined.
#-------------------------------------------
self.code_type = 'Grid'
self.slope_type = 'Grid' # (shouldn't need this)
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def initialize_d8_vars(self):
#---------------------------------------------
# Compute and store a variety of (static) D8
# flow grid variables. Embed structure into
# the "channel_base" component.
#---------------------------------------------
self.d8 = d8_base.d8_component()
#--------------------------------------------------
# D8 component builds its cfg filename from these
#-------------------------------------------------------------
# (2/11/2017) The initialize() method in d8_base.py now
# uses case_prefix (vs. site_prefix) for its CFG file:
# <site_prefix>_d8_global.cfg. This is to prevent confusion
# since this was the only CFG file that used site_prefix.
#-------------------------------------------------------------
# Note: This D8 component is serving a channels component
# that has already been instantiated and knows its
# directory and prefix information. So we can build
# the correct D8 cfg_file name from that info. It
# will then read path_info CFG file to get other info.
#-------------------------------------------------------------
cfg_file = (self.case_prefix + '_d8_global.cfg')
cfg_file = (self.cfg_directory + cfg_file)
self.d8.initialize( cfg_file=cfg_file, SILENT=self.SILENT, \
REPORT=self.REPORT )
# self.d8.site_prefix = self.site_prefix
# self.d8.case_prefix = self.case_prefix # (used in d8_base.py)
# self.d8.in_directory = self.in_directory
# self.d8.initialize( cfg_file=None, SILENT=self.SILENT, \
# REPORT=self.REPORT )
#---------------------------------------------------
# The next 2 "update" calls are needed when we use
# the new "d8_base.py", but are not needed when
# using the older "tf_d8_base.py".
#---------------------------------------------------
self.d8.update(self.time, SILENT=False, REPORT=True)
#-----------------------------------------------------------
# Note: This is also needed, but is not done by default in
# d8.update() because it hurts performance of Erode.
#-----------------------------------------------------------
self.d8.update_noflow_IDs()
#---------------------------------------------------
# Initialize separate set of d8 vars for flooding.
# (2019-09-21)
#---------------------------------------------------
if (self.FLOOD_OPTION):
d8f = copy.copy( self.d8 ) # (or use "copy.deepcopy"?)
d8f.FILL_PITS_IN_Z0 = False
d8f.LINK_FLATS = False
self.d8f = d8f
# initialize_d8_vars()
#-------------------------------------------------------------
def initialize_computed_vars(self):
#--------------------------------------------------------
# (5/17/12) If MANNING, we need to set z0vals to -1 so
# they are always defined for use with EMELI framework.
#--------------------------------------------------------
# BMI_base.read_config_file() reads "float" scalars as
# numpy "float64" data type. Applying np.float64()
# will break references.
#--------------------------------------------------------
if (self.MANNING):
if (self.nval is not None):
self.nval_min = self.nval.min()
self.nval_max = self.nval.max()
#---------------------------------------
print(' min(nval) = ' + str(self.nval_min) )
print(' max(nval) = ' + str(self.nval_max) )
#---------------------------------------
self.z0val = self.initialize_scalar(-1, dtype='float64')
self.z0val_min = self.initialize_scalar(-1, dtype='float64')
self.z0val_max = self.initialize_scalar(-1, dtype='float64')
if (self.LAW_OF_WALL):
if (self.z0val is not None):
self.z0val_min = self.z0val.min()
self.z0val_max = self.z0val.max()
#-----------------------------------------
print(' min(z0val) = ' + str(self.z0val_min) )
print(' max(z0val) = ' + str(self.z0val_max) )
#-----------------------------------------
self.nval = self.initialize_scalar(-1, dtype='float64')
self.nval_min = self.initialize_scalar(-1, dtype='float64')
self.nval_max = self.initialize_scalar(-1, dtype='float64')
#------------------------------------------------------------
# If neither set, use a constant velocity? (Test: 5/18/15)
#------------------------------------------------------------
if not(self.MANNING) and not(self.LAW_OF_WALL):
print('#### WARNING: In CFG file, MANNING=0 and LAW_OF_WALL=0.')
#-----------------------------------
self.z0val = self.initialize_scalar(-1, dtype='float64')
self.z0val_min = self.initialize_scalar(-1, dtype='float64')
self.z0val_max = self.initialize_scalar(-1, dtype='float64')
#--------------------------------------------------------------
self.nval = self.initialize_scalar(-1, dtype='float64')
self.nval_min = self.initialize_scalar(-1, dtype='float64')
self.nval_max = self.initialize_scalar(-1, dtype='float64')
#-----------------------------------------------
# Convert bank angles from degrees to radians.
#-------------------------------------------------
# When bank angles are given as a GRID, this is
# done in read_input_files(). Then realized that
# that conversion didn't occur for SCALAR angle.
# This caused "denom" later to be negative.
# (Fixed on: 2019-10-08.)
#-------------------------------------------------
### if (np.size( self.angle ) == 1):
if (self.angle_type.lower() == 'scalar'):
self.angle *= self.deg_to_rad # [radians]
#-----------------------------------------------
# Print mins and maxes of some other variables
# that were initialized by read_input_files().
#-----------------------------------------------
# print(' min(slope) = ' + str(self.slope.min()) )
# print(' max(slope) = ' + str(self.slope.max()) )
print(' min(width) = ' + str(self.width.min()) )
print(' max(width) = ' + str(self.width.max()) )
print(' min(angle) = ' + str(self.angle.min() * self.rad_to_deg) + ' [deg]')
print(' max(angle) = ' + str(self.angle.max() * self.rad_to_deg) + ' [deg]')
print(' min(sinuosity) = ' + str(self.sinu.min()) )
print(' max(sinuosity) = ' + str(self.sinu.max()) )
print(' min(init_depth) = ' + str(self.d0.min()) )
print(' max(init_depth) = ' + str(self.d0.max()) )
#------------------------------------------------
# 8/29/05. Multiply ds by (unitless) sinuosity
# Orig. ds is used by subsurface flow
#------------------------------------------------
# NB! We should also divide slopes in S_bed by
# the sinuosity, as now done here.
#----------------------------------------------------
# NB! This saves a modified version of ds that
# is only used within the "channels" component.
# The original "ds" is stored within the
# topoflow model component and is used for
# subsurface flow, etc.
#----------------------------------------------------
### self.d8.ds_chan = (self.sinu * ds)
### self.ds = (self.sinu * self.d8.ds)
self.d8.ds = (self.sinu * self.d8.ds) ### USE LESS MEMORY
###################################################
###################################################
### S_bed = (S_bed / self.sinu) #*************
self.slope = (self.slope / self.sinu)
self.S_bed = self.slope
self.S_free = self.S_bed.copy() # (2020-04-29)
###################################################
###################################################
#---------------------------
# Initialize spatial grids
#-----------------------------------------------
# NB! It is not a good idea to initialize the
# water depth grid to a nonzero scalar value.
#-----------------------------------------------
print('Initializing u, f, d grids...')
self.u = self.initialize_grid( 0, dtype='float64' )
self.f = self.initialize_grid( 0, dtype='float64' )
self.d = self.initialize_grid( 0, dtype='float64' )
self.d += self.d0 # (Add initial depth, if any.)
#------------------------------------------
# Use a constant velocity (Test: 5/18/15)
#------------------------------------------
# if not(self.MANNING) and not(self.LAW_OF_WALL):
# ## self.u[:] = 1.5 # [m/s]
# self.u[:] = 3.0 # [m/s]
#########################################################
# Add this on (2/3/13) so make the TF driver happy
# during its initialize when it gets reference to R.
# But in "update_R()", be careful not to break the ref.
# "Q" may be subject to the same issue.
#########################################################
self.Qc = self.initialize_grid( 0, dtype='float64' )
self.R = self.initialize_grid( 0, dtype='float64' )
#-----------------------------------------
# Added these new variables for flooding
#-----------------------------------------
self.d_flood = self.initialize_grid( 0, dtype='float64' ) #(9/16/19)
if (self.FLOOD_OPTION):
self.Qf = self.initialize_grid( 0, dtype='float64' ) #(9/20/19)
self.Q = self.initialize_grid( 0, dtype='float64' ) #(total)
else:
self.Q = self.Qc # (2 names for same thing)
##############################################################################
# seconds_per_year = 3600 * 24 * 365 = 31,536,000
# mm_per_meter = 1000
##############################################################################
# baseflow_rate = 250.0 # [mm per year], was 230.0
# baseflow_rate_mps = baseflow_rate / (31536000.0 * 1000.0) #[m/s]
# self.GW_init = np.zeros([self.ny, self.nx], dtype='Float64')
# self.GW_init += baseflow_rate_mps
##############################################################################
#---------------------------------------------------
# Initialize new grids. Is this needed? (9/13/14)
#---------------------------------------------------
self.tau = self.initialize_grid( 0, dtype='float64' )
self.u_star = self.initialize_grid( 0, dtype='float64' )
self.froude = self.initialize_grid( 0, dtype='float64' )
#---------------------------------------
# These are used to check mass balance
#---------------------------------------
self.vol_R = self.initialize_scalar( 0, dtype='float64')
self.vol_Q = self.initialize_scalar( 0, dtype='float64')
self.vol_chan = self.initialize_scalar( 0, dtype='float64')
self.vol_land = self.initialize_scalar( 0, dtype='float64')
#-------------------------------------------
# Make sure all slopes are valid & nonzero
# since otherwise flow will accumulate
#-------------------------------------------
if (self.KINEMATIC_WAVE):
self.remove_bad_slopes() #(3/8/07. Only Kin Wave case)
#----------------------------------------------
# Use "get_new_slope_grid()" in new_slopes.py
# instead of "remove_bad_slopes()".
# Or change "slope_grid" in the CFG file.
#----------------------------------------------
## self.get_new_slope_grid()
#----------------------------------------
# Initial volume of water in each pixel
#-----------------------------------------------------------
# Note: angles were read as degrees & converted to radians
#-----------------------------------------------------------
L2 = self.d * np.tan(self.angle)
self.A_wet = self.d * (self.width + L2)
self.P_wet = self.width + (np.float64(2) * self.d / np.cos(self.angle) )
self.vol = self.A_wet * self.d8.ds # [m3]
#---------------------------------------------------------
# Volume of water in channel when bankfull (2019-09-16)
# Note that w_bankfull is not used here, but:
# w_bankfull = width + (2 * d_bankfull * tan(angle))
# width = w_bankfull - (2 * d_bankfull * tan(angle))
#---------------------------------------------------------
L3 = self.d_bankfull * np.tan(self.angle)
Ac_bankfull = self.d_bankfull * (self.width + L3)
self.vol_bankfull = Ac_bankfull * self.d8.ds
self.vol_flood = self.initialize_grid( 0, dtype='float64')
#-------------------------------------------------------
# Note: depth is often zero at the start of a run, and
# both width and then P_wet are also zero in places.
# Therefore initialize Rh as shown.
#-------------------------------------------------------
self.Rh = self.initialize_grid( 0, dtype='float64' )
## self.Rh = self.A_wet / self.P_wet # [m]
## print 'P_wet.min() =', self.P_wet.min()
## print 'width.min() =', self.width.min()
## self.initialize_diversion_vars() # (9/22/14)
self.initialize_outlet_values()
self.initialize_peak_values()
self.initialize_min_and_max_values() ## (2/3/13)
## w = np.where( self.width <= 0 )
## nw = np.size( w[0] ) # (This is correct for 1D or 2D.)
## if (nw > 0):
## print 'WARNING:'
## print 'Number of locations where width==0 =', nw
## if (nw < 10):
## print 'locations =', w
## print ' '
# initialize_computed_vars()
#-------------------------------------------------------------
def initialize_diversion_vars(self):
#-----------------------------------------
# Compute source IDs from xy coordinates
#-----------------------------------------
source_rows = np.int32( self.sources_y / self.ny )
source_cols = np.int32( self.sources_x / self.nx )
self.source_IDs = (source_rows, source_cols)
## self.source_IDs = (source_rows * self.nx) + source_cols
#---------------------------------------
# Compute sink IDs from xy coordinates
#---------------------------------------
sink_rows = np.int32( self.sinks_y / self.ny )
sink_cols = np.int32( self.sinks_x / self.nx )
self.sink_IDs = (sink_rows, sink_cols)
## self.sink_IDs = (sink_rows * self.nx) + sink_cols
#-------------------------------------------------
# Compute canal entrance IDs from xy coordinates
#-------------------------------------------------
canal_in_rows = np.int32( self.canals_in_y / self.ny )
canal_in_cols = np.int32( self.canals_in_x / self.nx )
self.canal_in_IDs = (canal_in_rows, canal_in_cols)
## self.canal_in_IDs = (canal_in_rows * self.nx) + canal_in_cols
#---------------------------------------------
# Compute canal exit IDs from xy coordinates
#---------------------------------------------
canal_out_rows = np.int32( self.canals_out_y / self.ny )
canal_out_cols = np.int32( self.canals_out_x / self.nx )
self.canal_out_IDs = (canal_out_rows, canal_out_cols)
## self.canal_out_IDs = (canal_out_rows * self.nx) + canal_out_cols
#--------------------------------------------------
# This will be computed from Q_canal_fraction and
# self.Q and then passed back to Diversions
#--------------------------------------------------
self.Q_canals_in = np.array( self.n_sources, dtype='float64' )
# initialize_diversion_vars()
#-------------------------------------------------------------------
def initialize_outlet_values(self):
#---------------------------------------------------
# Note: These are retrieved and used by TopoFlow
# for the stopping condition. TopoFlow
# receives a reference to these, but in
# order to see the values change they need
# to be stored as mutable, 1D numpy arrays.
#---------------------------------------------------
# Note: Q_last is internal to TopoFlow.
#---------------------------------------------------
# self.Q_outlet = self.Q[ self.outlet_ID ]
self.Q_outlet = self.initialize_scalar(0, dtype='float64')
self.u_outlet = self.initialize_scalar(0, dtype='float64')
self.d_outlet = self.initialize_scalar(0, dtype='float64')
self.f_outlet = self.initialize_scalar(0, dtype='float64')
# initialize_outlet_values()
#-------------------------------------------------------------------
def initialize_peak_values(self):
#-------------------------
# Initialize peak values
#-------------------------
self.Q_peak = self.initialize_scalar(0, dtype='float64')
self.T_peak = self.initialize_scalar(0, dtype='float64')
self.u_peak = self.initialize_scalar(0, dtype='float64')
self.Tu_peak = self.initialize_scalar(0, dtype='float64')
self.d_peak = self.initialize_scalar(0, dtype='float64')
self.Td_peak = self.initialize_scalar(0, dtype='float64')
# initialize_peak_values()
#-------------------------------------------------------------------
def initialize_min_and_max_values(self):
#-------------------------------
# Initialize min & max values
# (2/3/13), for new framework.
#-------------------------------
v = 1e6
self.Q_min = self.initialize_scalar(v, dtype='float64')
self.Q_max = self.initialize_scalar(-v, dtype='float64')
self.u_min = self.initialize_scalar(v, dtype='float64')
self.u_max = self.initialize_scalar(-v, dtype='float64')
self.d_min = self.initialize_scalar(v, dtype='float64')
self.d_max = self.initialize_scalar(-v, dtype='float64')
# initialize_min_and_max_values()
#-------------------------------------------------------------------
def update_flood_d8_vars(self):
#---------------------------------------------------------
# Note: Use free-surface gradient of d_flood to compute
# flow to neighbors. (209-09-17)
#---------------------------------------------------------
# Note: self.d_flood is used to compute self.Q.
#---------------------------------------------------------
self.FLOODING = (self.d_flood.max() > 0)
if not(self.FLOODING):
self.d8f = copy.copy( self.d8 )
self.d8f.FILL_PITS_IN_Z0 = False
self.d8f.LINK_FLATS = False
return
#--------------------------------------------------------
# Use (DEM + d_flood) to compute a free-surface gradient
# and update all of the D8 vars.
#--------------------------------------------------------
z_free = (self.d8.DEM + self.d_flood)
#---------------------------------------
self.d8f.update_flow_grid( DEM=z_free ) ######
self.d8f.update_parent_ID_grid()
self.d8f.update_parent_IDs() # (needed for gradients)
self.d8f.update_flow_from_IDs()
self.d8f.update_flow_to_IDs()
self.d8f.update_noflow_IDs() # (needed to fill depressions naturally)
self.d8f.update_flow_width_grid() # (dw)
self.d8f.update_flow_length_grid() # (ds)
### self.d8f.update_area_grid()
#----------------------------------------
# self.d8f.d8_grid gives the D8 flow codes
# update_flood_d8_vars()
#-------------------------------------------------------------------
# def update_excess_rainrate(self):
def update_R(self):
#----------------------------------------
# Compute the "excess rainrate", R.
# Each term must have same units: [m/s]
# Sum = net gain/loss rate over pixel.
#----------------------------------------------------
# R can be positive or negative. If negative, then
# water is removed from the surface at rate R until
# surface water is consumed.
#--------------------------------------------------------------
# P = precip_rate [m/s] (converted by read_input_data()).
# SM = snowmelt rate [m/s]
# GW = seep rate [m/s] (water_table intersects surface)
# ET = evap rate [m/s]
# IN = infil rate [m/s]
# MR = icemelt rate [m/s]
#------------------------------------------------------------
# Use refs to other comp vars from new framework. (5/18/12)
#------------------------------------------------------------
P = self.P_rain # (This is now liquid-only precip. 9/14/14)
SM = self.SM
GW = self.GW
### GW = self.GW_init
ET = self.ET
IN = self.IN
MR = self.MR
## if (self.DEBUG):
## print 'At time:', self.time_min, ', P =', P, '[m/s]'
#--------------
# For testing
#--------------
# print( '(Pmin, Pmax) = ' + str(P.min()) + ', ' + str(P.max()) )
# print( '(SMmin, SMmax) = ' + str(SM.min()) + ', ' + str(SM.max()) )
# print( '(GWmin, GWmax) = ' + str(GW.min()) + ', ' + str(GW.max()) )
# print( '(ETmin, ETmax) = ' + str(ET.min()) + ', ' + str(ET.max()) )
# print( '(INmin, INmax) = ' + str(IN.min()) + ', ' + str(IN.max()) )
# print( '(MRmin, MRmax) = ' + str(MR.min()) + ', ' + str(MR.max()) )
# print( ' ' )
self.R = (P + SM + GW + MR) - (ET + IN)
# update_R()
#-------------------------------------------------------------------
def update_R_integral(self):
#-----------------------------------------------
# Update mass total for R, sum over all pixels
#---------------------------------------------------------------
# Note: Typically, chan_dt < met_dt, so that vol_R is updated
# more frequently than vol_P. Since EMELI performs linear
# interpolation in time, integrals may be slightly different.
#---------------------------------------------------------------
volume = np.double(self.R * self.da * self.dt) # [m^3]
if (np.size(volume) == 1):
self.vol_R += (volume * self.rti.n_pixels)
else:
self.vol_R += np.sum(volume)
# update_R_integral()
#-------------------------------------------------------------------
def update_channel_discharge(self):
#---------------------------------------------------------
# The discharge grid, Q, gives the flux of water _out_
# of each grid cell. This entire amount then flows
# into one of the 8 neighbor grid cells, as indicated
# by the D8 flow code. The update_flow_volume() function
# is called right after this one in update() and uses
# the Q grid.
#---------------------------------------------------------
# 7/15/05. The cross-sectional area of a trapezoid is
# given by: Ac = d * (w + (d * tan(theta))),
# where w is the bottom width. If we were to
# use: Ac = w * d, then we'd have Ac=0 when w=0.
# We also need angle units to be radians.
#---------------------------------------------------------
#-----------------------------
# Compute the discharge grid
#------------------------------------------------------
# A_wet is initialized in initialize_computed_vars().
# A_wet is updated in update_trapezoid_Rh().
#------------------------------------------------------
self.Qc[:] = self.u * self.A_wet ## (2/19/13, in place)
#--------------
# For testing
#--------------
## print '(umin, umax) =', self.u.min(), self.u.max()
## print '(d0min, d0max) =', self.d0.min(), self.d0.max()
## print '(dmin, dmax) =', self.d.min(), self.d.max()
## print '(amin, amax) =', self.angle.min(), self.angle.max()
## print '(wmin, wmax) =', self.width.min(), self.width.max()
## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max()
## print '(L2min, L2max) =', L2.min(), L2.max()
## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max()
#--------------
# For testing
#--------------
# print 'dmin, dmax =', self.d.min(), self.d.max()
# print 'umin, umax =', self.u.min(), self.u.max()
# print 'Qmin, Qmax =', self.Q.min(), self.Q.max()
# print ' '
# print 'u(outlet) =', self.u[self.outlet_ID]
# print 'Q(outlet) =', self.Q[self.outlet_ID] ########
#----------------------------------------------------
# Wherever depth is less than z0, assume that water
# is not flowing and set u and Q to zero.
# However, we also need (d gt 0) to avoid a divide
# by zero problem, even when numerators are zero.
#----------------------------------------------------
# FLOWING = (d > (z0/aval))
#*** FLOWING[self.d8.noflow_IDs] = False ;******
# u = (u * FLOWING)
# Q = (Q * FLOWING)
# d = np.maximum(d, 0.0) ;(allow depths lt z0, if gt 0.)
# update_channel_discharge()
#-------------------------------------------------------------------
def update_flood_discharge(self):
### if not(self.FLOODING):
### return
#------------------------------------------
# Find grid cells with & without flooding
#------------------------------------------
w1 = (self.d_flood > 0) # (array of True or False)
w2 = np.invert( w1 )
#---------------------------------------------------
# (2019-09-16) Add discharge due to overbank flow
# See manning_formula() function in this file.
#---------------------------------------------------
uf = (self.u / 5.0)
Af = (self.d8f.dw * self.d_flood) ###### CHECK dw
self.Qf[ w1 ] = uf[ w1 ] * Af[ w1 ] # (in place)
self.Qf[ w2 ] = 0.0
# update_flood_discharge()
#-------------------------------------------------------------------
def update_discharge(self):
#------------------------------------------------------------
# Note: This is not finished yet. The fact that channel
# flow and overbank flooding flow can have different
# D8 flow directions, with the flooding flow direction
# switching back and forth, can result in an oscillation
# or spikiness in the hydrograph. It is not yet
# clear how to best handle this. Reducing the timestep
# does not seem to resolve the issue. However, flood
# depths seem to be well-behaved.
#------------------------------------------------------------
if (self.FLOOD_OPTION):
#------------------------------------------
# Look at where the 2 D8 flow grids align
# First part here with w1 is solid.
#------------------------------------------
# w1 = (self.d8.d8_grid == self.d8f.d8_grid)
# w2 = np.invert( w1 )
## self.Q[ w1 ] = self.Qc[ w1 ] + self.Qf[ w1 ]
#--------------------------------------------------
# Not sure how to handle w2 grid cells. This
# just makes it easy to see where the D8 flow
# directions differ, at places in main channels.
#--------------------------------------------------
## self.Q[ w2 ] = 0.0
# This part with w1 is also solid.
w1 = (self.Qf == 0)
w2 = np.invert( w1 )
self.Q[ w1 ] = self.Qc[ w1 ]
#----------------------------------------------------
# This is not 100% correct, since the D8 flow grids
# are not the same for the channel and flood flows.
#----------------------------------------------------
## self.Q[:] = self.Qc + self.Qf
#---------------------------------------------------------
# This gives smoother hydrographs in main channels (with
# some spikes still), but has Q=0 for most grid cells.
#---------------------------------------------------------
## self.Q[:] = self.Qf
#-------------------------------------------------
# A compromise, but hydrograph still has spikes,
# even with timestep of 1 second for Treynor.
#-------------------------------------------------
# np.maximum( self.Qc, self.Qf, self.Q) # (in place)
#---------------------------------------------------
# Average with previous time step to smooth spikes,
# thought due to switching of flow direction.
# Hydrographs are much smoother.
#----------------------------------------------------
Q2 = (self.Qc[ w2 ] + self.Qf[ w2 ])
Q3 = (self.Q[ w2 ] + Q2) / 2.0
self.Q[ w2 ] = Q3
### self.Q[ w2 ] = (self.Qc + self.Qf) / 2.0 # (in place)
# For another idea
## self.Q[ self.d8f.parent_IDs ]
### self.Q[ w2 ] = self.Qc[ w2 ] + self.Qf[ w2 ]/2.0 ###############
### self.Q[ w2 ] = self.Qc[ w2 ] ####################
## self.Q[:] = self.Qc + self.Qf
else:
# Set self.Q = self.Qc in initialize().
dum = 0
# update_discharge()
#-------------------------------------------------------------------
def update_diversions(self):
#--------------------------------------------------------------
# Note: The Channel component requests the following input
# vars from the Diversions component by including
# them in its "get_input_vars()":
# (1) Q_sources, Q_sources_x, Q_sources_y
# (2) Q_sinks, Q_sinks_x, Q_sinks_y
# (3) Q_canals_out, Q_canals_out_x, Q_canals_out_y
# (4) Q_canals_fraction, Q_canals_in_x, Q_canals_in_y.
# source_IDs are computed from (x,y) coordinates during
# initialize().
#
# Diversions component needs to get Q_canals_in from the
# Channel component.
#--------------------------------------------------------------
# Note: This *must* be called after update_discharge() and
# before update_flow_volume().
#--------------------------------------------------------------
# Note: The Q grid stores the volume flow rate *leaving* each
# grid cell in the domain. For sources, an extra amount
# is leaving the cell which can flow into its D8 parent
# cell. For sinks, a lesser amount is leaving the cell
# toward the D8 parent.
#--------------------------------------------------------------
# Note: It is not enough to just update Q and then call the
# update_flow_volume() method. This is because it
# won't update the volume in the channels in the grid
# cells that the extra discharge is leaving from.
#--------------------------------------------------------------
# If a grid cell contains a "source", then an additional Q
# will flow *into* that grid cell and increase flow volume.
#--------------------------------------------------------------
#-------------------------------------------------------------
# This is not fully tested but runs. However, the Diversion
# vars are still computed even when Diversions component is
# disabled. So it slows things down somewhat.
#-------------------------------------------------------------
return
########################
########################
#----------------------------------------
# Update Q and vol due to point sources
#----------------------------------------
## if (hasattr(self, 'source_IDs')):
if (self.n_sources > 0):
self.Q[ self.source_IDs ] += self.Q_sources
self.vol[ self.source_IDs ] += (self.Q_sources * self.dt)
#--------------------------------------
# Update Q and vol due to point sinks
#--------------------------------------
## if (hasattr(self, 'sink_IDs')):
if (self.n_sinks > 0):
self.Q[ self.sink_IDs ] -= self.Q_sinks
self.vol[ self.sink_IDs ] -= (self.Q_sinks * self.dt)
#---------------------------------------
# Update Q and vol due to point canals
#---------------------------------------
## if (hasattr(self, 'canal_in_IDs')):
if (self.n_canals > 0):
#-----------------------------------------------------------------
# Q grid was just modified. Apply the canal diversion fractions
# to compute the volume flow rate into upstream ends of canals.
#-----------------------------------------------------------------
Q_canals_in = self.Q_canals_fraction * self.Q[ self.canal_in_IDs ]
self.Q_canals_in = Q_canals_in
#----------------------------------------------------
# Update Q and vol due to losses at canal entrances
#----------------------------------------------------
self.Q[ self.canal_in_IDs ] -= Q_canals_in
self.vol[ self.canal_in_IDs ] -= (Q_canals_in * self.dt)
#-------------------------------------------------
# Update Q and vol due to gains at canal exits.
# Diversions component accounts for travel time.
#-------------------------------------------------
self.Q[ self.canal_out_IDs ] += self.Q_canals_out
self.vol[ self.canal_out_IDs ] += (self.Q_canals_out * self.dt)
# update_diversions()
#-------------------------------------------------------------------
def update_flow_volume(self):
#-----------------------------------------------------------
# Notes: This function must be called after
# update_discharge() and update_diversions().
#-----------------------------------------------------------
# Notes: Q = surface discharge [m^3/s]
# R = excess precip. rate [m/s]
# da = pixel area [m^2]
# dt = channel flow timestep [s]
# vol = total volume of water in pixel [m^3]
# v2 = temp version of vol
# w1 = IDs of pixels that...
# p1 = IDs of parent pixels that...
#-----------------------------------------------------------
dt = self.dt # [seconds]
#----------------------------------------------------
# Add contribution (or loss ?) from excess rainrate
#----------------------------------------------------
# Contributions over entire grid cell from rainfall,
# snowmelt, icemelt and baseflow (minus losses from
# evaporation and infiltration) are assumed to flow
# into the channel within the grid cell.
# Note that R is allowed to be negative.
#----------------------------------------------------
self.vol += (self.R * self.da) * dt # (in place)
#-----------------------------------------
# Add contributions from neighbor pixels
#-------------------------------------------------------------
# Each grid cell passes flow to *one* downstream neighbor.
# Note that multiple grid cells can flow toward a given grid
# cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc.
#-------------------------------------------------------------
# (2/16/10) RETEST THIS. Before, a copy called "v2" was
# used but this doesn't seem to be necessary.
#-------------------------------------------------------------
if (self.d8.p1_OK):
self.vol[ self.d8.p1 ] += (dt * self.Qc[self.d8.w1])
if (self.d8.p2_OK):
self.vol[ self.d8.p2 ] += (dt * self.Qc[self.d8.w2])
if (self.d8.p3_OK):
self.vol[ self.d8.p3 ] += (dt * self.Qc[self.d8.w3])
if (self.d8.p4_OK):
self.vol[ self.d8.p4 ] += (dt * self.Qc[self.d8.w4])
if (self.d8.p5_OK):
self.vol[ self.d8.p5 ] += (dt * self.Qc[self.d8.w5])
if (self.d8.p6_OK):
self.vol[ self.d8.p6 ] += (dt * self.Qc[self.d8.w6])
if (self.d8.p7_OK):
self.vol[ self.d8.p7 ] += (dt * self.Qc[self.d8.w7])
if (self.d8.p8_OK):
self.vol[ self.d8.p8 ] += (dt * self.Qc[self.d8.w8])
#----------------------------------------------------
# Subtract the amount that flows out to D8 neighbor
#----------------------------------------------------
self.vol -= (self.Qc * dt) # (in place)
#--------------------------------------------------------
# While R can be positive or negative, the surface flow
# volume must always be nonnegative. This also ensures
# that the flow depth is nonnegative. (7/13/06)
#--------------------------------------------------------
## self.vol = np.maximum(self.vol, 0.0)
## self.vol[:] = np.maximum(self.vol, 0.0) # (2/19/13)
np.maximum( self.vol, 0.0, self.vol ) # (in place)
# update_flow_volume
#-------------------------------------------------------------------
def update_flood_volume(self):
### if not(self.FLOODING):
### return
dt = self.dt # [seconds]
#---------------------------------------------------------
# Excess water volume from overbank flow acts as a source
# of water in the cell, that adds to whatever volume of
# water is already there. Channel volume at bankfull,
# called vol_bankfull, is computed in initialize().
# D8 child cells with a higher free-surface may add to
# the amount in a cell, and this total is reduced by
# whatever amount flows to the D8 parent cell.
#----------------------------------------------------------
dvol = (self.vol - self.vol_bankfull)
self.vol_flood += np.maximum(dvol, 0.0)
### np.maximum( dvol, 0.0, self.vol_flood) # (in place)
#--------------------------------------------------------------
# Wherever vol > vol_bankfull, the channel volume computed
# by update_flow_volume() is wrong and should instead be
# the bankfull volume. Extra water volume is put into d_flood.
#--------------------------------------------------------------
np.minimum(self.vol, self.vol_bankfull, self.vol ) # (in place)
#-----------------------------------------
# Add contributions from neighbor pixels
#-------------------------------------------------------------
# Each grid cell passes flow to *one* downstream neighbor.
# Note that multiple grid cells can flow toward a given grid
# cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc.
#-------------------------------------------------------------
if (self.d8f.p1_OK):
self.vol_flood[ self.d8f.p1 ] += (dt * self.Qf[self.d8f.w1])
if (self.d8f.p2_OK):
self.vol_flood[ self.d8f.p2 ] += (dt * self.Qf[self.d8f.w2])
if (self.d8f.p3_OK):
self.vol_flood[ self.d8f.p3 ] += (dt * self.Qf[self.d8f.w3])
if (self.d8f.p4_OK):
self.vol_flood[ self.d8f.p4 ] += (dt * self.Qf[self.d8f.w4])
if (self.d8f.p5_OK):
self.vol_flood[ self.d8f.p5 ] += (dt * self.Qf[self.d8f.w5])
if (self.d8f.p6_OK):
self.vol_flood[ self.d8f.p6 ] += (dt * self.Qf[self.d8f.w6])
if (self.d8f.p7_OK):
self.vol_flood[ self.d8f.p7 ] += (dt * self.Qf[self.d8f.w7])
if (self.d8f.p8_OK):
self.vol_flood[ self.d8f.p8 ] += (dt * self.Qf[self.d8f.w8])
#----------------------------------------------------
# Subtract the amount that flows out to D8 neighbor
#----------------------------------------------------
self.vol_flood -= (self.Qf * dt) # (in place)
#--------------------------------------------------------
# While R can be positive or negative, the surface flow
# volume must always be nonnegative. This also ensures
# that the flow depth is nonnegative. (7/13/06)
#--------------------------------------------------------
np.maximum( self.vol_flood, 0.0, self.vol_flood ) # (in place)
# update_flood_volume()
#-------------------------------------------------------------------
def update_flow_depth_LAST(self):
#-----------------------------------------------------------
# Notes: 7/18/05. Modified to use the equation for volume
# of a trapezoidal channel: vol = Ac * ds, where
# Ac=d*[w + d*tan(t)], and to solve the resulting
# quadratic (discarding neg. root) for new depth, d.
# 8/29/05. Now original ds is used for subsurface
# flow and there is a ds_chan which can include a
# sinuosity greater than 1. This may be especially
# important for larger pixel sizes.
# Removed (ds > 1) here which was only meant to
# avoid a "divide by zero" error at pixels where
# (ds eq 0). This isn't necessary since the
# Flow_Lengths function in utils_TF.pro never
# returns a value of zero.
#----------------------------------------------------------
# Modified to avoid double where calls, which
# reduced cProfile run time for this method from
# 1.391 to 0.644. (9/23/14)
#----------------------------------------------------------
# Commented this out on (2/18/10) because it doesn't
# seem to be used anywhere now. Checked all
# of the Channels components.
#----------------------------------------------------------
# self.d_last = self.d.copy()
#-----------------------------------
# Make some local aliases and vars
#-----------------------------------------------------------
# Note: angles were read as degrees & converted to radians
#-----------------------------------------------------------
d = self.d
d_flood = self.d_flood ##### (2019-09-16)
width = self.width ###
angle = self.angle
SCALAR_ANGLES = (np.size(angle) == 1)
#------------------------------------------------------
# (2/18/10) New code to deal with case where the flow
# depth exceeds a bankfull depth.
# For now, d_bankfull is hard-coded.
#
# CHANGE Manning's n here, too?
#------------------------------------------------------
d_bankfull = 4.0 # [meters]
################################
wb = (self.d > d_bankfull) # (array of True or False)
self.width[ wb ] = self.d8.dw[ wb ]
if not(SCALAR_ANGLES):
self.angle[ wb ] = 0.0
# w_overbank = np.where( d > d_bankfull )
# n_overbank = np.size( w_overbank[0] )
# if (n_overbank != 0):
# width[ w_overbank ] = self.d8.dw[ w_overbank ]
# if not(SCALAR_ANGLES): angle[w_overbank] = 0.0
#------------------------------------------------------
# (2/18/10) New code to deal with case where the top
# width exceeds the grid cell width, dw.
#------------------------------------------------------
top_width = width + (2.0 * d * np.sin(self.angle))
wb = (top_width > self.d8.dw) # (array of True or False)
self.width[ wb ] = self.d8.dw[ wb ]
if not(SCALAR_ANGLES):
self.angle[ wb ] = 0.0
# wb = np.where(top_width > self.d8.dw)
# nb = np.size(w_bad[0])
# if (nb != 0):
# width[ wb ] = self.d8.dw[ wb ]
# if not(SCALAR_ANGLES): angle[ wb ] = 0.0
#----------------------------------
# Is "angle" a scalar or a grid ?
#----------------------------------
if (SCALAR_ANGLES):
if (angle == 0.0):
d = self.vol / (width * self.d8.ds)
else:
denom = 2.0 * np.tan(angle)
arg = 2.0 * denom * self.vol / self.d8.ds
arg += width**(2.0)
d = (np.sqrt(arg) - width) / denom
else:
#-----------------------------------------------------
# Pixels where angle is 0 must be handled separately
#-----------------------------------------------------
w1 = ( angle == 0 ) # (arrays of True or False)
w2 = np.invert( w1 )
#-----------------------------------
A_top = width[w1] * self.d8.ds[w1]
d[w1] = self.vol[w1] / A_top
#-----------------------------------
denom = 2.0 * np.tan(angle[w2])
arg = 2.0 * denom * self.vol[w2] / self.d8.ds[w2]
arg += width[w2]**(2.0)
d[w2] = (np.sqrt(arg) - width[w2]) / denom
#-----------------------------------------------------
# Pixels where angle is 0 must be handled separately
#-----------------------------------------------------
# wz = np.where( angle == 0 )
# nwz = np.size( wz[0] )
# wzc = np.where( angle != 0 )
# nwzc = np.size( wzc[0] )
#
# if (nwz != 0):
# A_top = width[wz] * self.d8.ds[wz]
# ## A_top = self.width[wz] * self.d8.ds_chan[wz]
# d[wz] = self.vol[wz] / A_top
#
# if (nwzc != 0):
# term1 = 2.0 * np.tan(angle[wzc])
# arg = 2.0 * term1 * self.vol[wzc] / self.d8.ds[wzc]
# arg += width[wzc]**(2.0)
# d[wzc] = (np.sqrt(arg) - width[wzc]) / term1
#------------------------------------------
# Set depth values on edges to zero since
# they become spikes (no outflow) 7/15/06
#------------------------------------------
d[ self.d8.noflow_IDs ] = 0.0
#------------------------------------------------
# 4/19/06. Force flow depth to be positive ?
#------------------------------------------------
# This seems to be needed with the non-Richards
# infiltration routines when starting with zero
# depth everywhere, since all water infiltrates
# for some period of time. It also seems to be
# needed more for short rainfall records to
# avoid a negative flow depth error.
#------------------------------------------------
# 7/13/06. Still needed for Richards method
#------------------------------------------------
## self.d = np.maximum(d, 0.0)
np.maximum(d, 0.0, self.d) # (2/19/13, in place)
#-------------------------------------------------
# Find where d <= 0 and save for later (9/23/14)
#-------------------------------------------------
self.d_is_pos = (self.d > 0)
self.d_is_neg = np.invert( self.d_is_pos )
# update_flow_depth_LAST
#-------------------------------------------------------------------
def update_flow_depth(self):
#------------------------------------------------------------
# Notes: 2019-09/16. This function replaces the one above
# now called "update_flow_depth_LAST(). This version
# allows overbank flow and flooding.
#------------------------------------------------------------
# Notes: 7/18/05. Modified to use the equation for volume
# of a trapezoidal channel: vol = Ac * ds, where
# Ac=d*[w + d*tan(t)], and to solve the resulting
# quadratic (discarding neg. root) for new depth, d.
# 8/29/05. Now original ds is used for subsurface
# flow and there is a ds_chan which can include a
# sinuosity greater than 1. This may be especially
# important for larger pixel sizes.
# Removed (ds > 1) here which was only meant to
# avoid a "divide by zero" error at pixels where
# (ds eq 0). This isn't necessary since the
# Flow_Lengths function in utils_TF.pro never
# returns a value of zero.
#----------------------------------------------------------
# Modified to avoid double where calls, which
# reduced cProfile run time for this method from
# 1.391 to 0.644. (9/23/14)
#----------------------------------------------------------
# Commented this out on (2/18/10) because it doesn't
# seem to be used anywhere now. Checked all
# of the Channels components.
#----------------------------------------------------------
# self.d_last = self.d.copy()
#-----------------------------------
# Make some local aliases and vars
#-----------------------------------------------------------
# Note: angles were read as degrees & converted to radians
#-----------------------------------------------------------
d = self.d
width = self.width ###
angle = self.angle
SCALAR_ANGLES = (np.size(angle) == 1)
#-----------------------------------------------
# Now compute the water depth in the channels.
#-----------------------------------------------
# Is "angle" a scalar or a grid ?
#----------------------------------
if (SCALAR_ANGLES):
if (angle == 0.0):
d = self.vol / (width * self.d8.ds)
else:
denom = 2.0 * np.tan(angle)
arg = 2.0 * denom * self.vol / self.d8.ds
arg += width**(2.0)
d = (np.sqrt(arg) - width) / denom
# For debugging
# print('angle = ' + str(angle) )
# print('denom.min() = ' + str(denom.min()) )
# print('denom.max() = ' + str(denom.max()) )
# print('ds.min() = ' + str(self.d8.ds.min()) )
# print('ds.max() = ' + str(self.d8.ds.max()) )
# print('arg.min() = ' + str(arg.min()) )
# print('arg.max() = ' + str(arg.max()) )
# d = (np.sqrt(arg) - width) / denom
else:
#-----------------------------------------------------
# Pixels where angle is 0 must be handled separately
#-----------------------------------------------------
w1 = ( angle == 0 ) # (arrays of True or False)
w2 = np.invert( w1 )
#-----------------------------------
A_top = width[w1] * self.d8.ds[w1]
d[w1] = self.vol[w1] / A_top
#-----------------------------------
denom = 2.0 * np.tan(angle[w2])
arg = 2.0 * denom * self.vol[w2] / self.d8.ds[w2]
arg += width[w2]**(2.0)
d[w2] = (np.sqrt(arg) - width[w2]) / denom
#------------------------------------------------------------
# Wherever vol > vol_bankfull, the flow depth just computed
# is wrong and should instead be the bankfull depth.
# Extra water volume has already been put into d_flood.
#------------------------------------------------------------
#### d[ wb1 ] = self.d_bankfull[ wb1 ]
#------------------------------------------
# Set depth values on edges to zero since
# they become spikes (no outflow) 7/15/06
#-----------------------------------------------------------
# NB! This destroys mass, and will have a small effect on
# mass balance calculations. Since flooding now uses the
# free-surface gradient (DEM + d_flood), we should not
# set it to zero at interior noflow_IDs.
#-----------------------------------------------------------
d[ self.d8.noflow_IDs ] = 0.0 # (was needed for Baro)
## d[ self.d8.edge_IDs ] = 0.0
#------------------------------------------------
# 4/19/06. Force flow depth to be positive ?
#------------------------------------------------
# This seems to be needed with the non-Richards
# infiltration routines when starting with zero
# depth everywhere, since all water infiltrates
# for some period of time. It also seems to be
# needed more for short rainfall records to
# avoid a negative flow depth error.
#------------------------------------------------
# 7/13/06. Still needed for Richards method
#------------------------------------------------
## self.d = np.maximum(d, 0.0)
np.maximum(d, 0.0, self.d) # (2/19/13, in place)
#-------------------------------------------------
# Find where d <= 0 and save for later (9/23/14)
#-------------------------------------------------
self.d_is_pos = (self.d > 0)
self.d_is_neg = np.invert( self.d_is_pos )
# update_flow_depth
#-------------------------------------------------------------------
def update_flood_depth(self):
#-----------------------------------------------------------
# Wherever vol > vol_bankfull, the flow depth computed by
# update_flow_depth() is wrong and should instead be the
# bankfull depth. Extra water volume is put into d_flood.
#-----------------------------------------------------------
# Note: This shouldn't be necessary now.
#-----------------------------------------------------------
# np.minimum(self.d, self.d_bankfull, self.d ) # (in place)
#----------------------------------------------------------
# (2019-09-16) Compute the overbank/flooding depth.
# Channel volume at bankfull is computed in initialize().
#----------------------------------------------------------
# Remember that "width" is the trapezoid bottom width.
# In addition, w_bankfull is not used here, but:
# w_bankfull = width + (2 * d_bankfull * tan(angle))
# width = w_bankfull - (2 * d_bankfull * tan(angle))
# If we know any 3 of these 4 vars, we can compute the
# 4th one. So assume d_bankfull, angle & width are known.
# HOWEVER, values of w_bankfull found by remote sensing
# may be more accurate than values of d_bankfull.
#----------------------------------------------------------
SCALAR_DA = (np.size(self.d8.da) == 1)
d_flood = self.d_flood
vol_flood = self.vol_flood ###################
w1 = (vol_flood > 0) # (array of True or False)
w2 = np.invert( w1 )
if (SCALAR_DA):
d_flood[ w1 ] = vol_flood[ w1 ] / self.d8.da
else:
d_flood[ w1 ] = vol_flood[ w1 ] / self.d8.da[ w1 ]
d_flood[ w2 ] = 0.0
#-------------------------------------------
# Set depth values on edges to zero since
# otherwise they become spikes (no outflow)
#-----------------------------------------------------------
# NB! This destroys mass, and will have a small effect on
# mass balance calculations. Since flooding uses the
# free-surface gradient (DEM + d_flood), we should not
# set it to zero at interior noflow_IDs.
#-----------------------------------------------------------
d_flood[ self.d8.noflow_IDs ] = 0.0
## d_flood[ self.d8.edge_IDs ] = 0.0
self.d_flood[:] = d_flood # write in place
# update_flood_depth()
#-------------------------------------------------------------------
def update_free_surface_slope(self):
#-----------------------------------------------------------
# Notes: It is assumed that the flow directions don't
# change even though the free surface is changing.
#-----------------------------------------------------------
# NB! This only applies to water in the channels, and
# cannot be used when there is overbank flow.
# See "z_free" above instead.
#-----------------------------------------------------------
delta_d = (self.d - self.d[self.d8.parent_IDs])
self.S_free[:] = self.S_bed + (delta_d / self.d8.ds)
#--------------------------------------------
# Don't do this; negative slopes are needed
# to decelerate flow in dynamic wave case
# and for backwater effects.
#--------------------------------------------
# Set negative slopes to zero
#------------------------------
### self.S_free = np.maximum(self.S_free, 0)
# update_free_surface_slope()
#-------------------------------------------------------------------
def update_shear_stress(self):
#--------------------------------------------------------
# Notes: 9/9/14. Added so shear stress could be shared.
# This uses the depth-slope product.
#--------------------------------------------------------
if (self.KINEMATIC_WAVE):
slope = self.S_bed
else:
slope = self.S_free
self.tau[:] = self.rho_H2O * self.g * self.d * slope
# update_shear_stress()
#-------------------------------------------------------------------
def update_shear_speed(self):
#--------------------------------------------------------
# Notes: 9/9/14. Added so shear speed could be shared.
#--------------------------------------------------------
self.u_star[:] = np.sqrt( self.tau / self.rho_H2O )
# update_shear_speed()
#-------------------------------------------------------------------
def update_trapezoid_Rh(self):
#-------------------------------------------------------------
# Notes: Compute the hydraulic radius of a trapezoid that:
# (1) has a bed width of wb >= 0 (0 for triangular)
# (2) has a bank angle of theta (0 for rectangular)
# (3) is filled with water to a depth of d.
# The units of wb and d are meters. The units of
# theta are assumed to be degrees and are converted.
#-------------------------------------------------------------
# NB! wb should never be zero, so P_wet can never be 0,
# which would produce a NaN (divide by zero).
#-------------------------------------------------------------
# See Notes for TF_Tan function in utils_TF.pro
# AW = d * (wb + (d * TF_Tan(theta_rad)) )
#-------------------------------------------------------------
# 9/9/14. Bug fix. Angles were already in radians but
# were converted to radians again.
#--------------------------------------------------------------
#---------------------------------------------------------
# Compute hydraulic radius grid for trapezoidal channels
#-----------------------------------------------------------
# Note: angles were read as degrees & converted to radians
#-----------------------------------------------------------
d = self.d # (local synonyms)
wb = self.width # (trapezoid bottom width)
L2 = d * np.tan( self.angle )
A_wet = d * (wb + L2)
P_wet = wb + (np.float64(2) * d / np.cos(self.angle) )
#---------------------------------------------------
# At noflow_IDs (e.g. edges) P_wet may be zero
# so do this to avoid "divide by zero". (10/29/11)
#---------------------------------------------------
P_wet[ self.d8.noflow_IDs ] = np.float64(1)
Rh = (A_wet / P_wet)
#--------------------------------
# w = np.where(P_wet == 0)
# print 'In update_trapezoid_Rh():'
# print ' P_wet= 0 at', w[0].size, 'cells'
#---------------------------------------------------
# Override Rh for overland flow, where d_flood > 0
# (2019-09-18)
#---------------------------------------------------
# w1 = (self.d_flood > 0) # (array of True or False)
# Rh[ w1 ] = self.d_flood[ w1 ] #########################################
#------------------------------------
# Force edge pixels to have Rh = 0.
# This will make u = 0 there also.
#------------------------------------
Rh[ self.d8.noflow_IDs ] = np.float64(0)
## w = np.where(wb <= 0)
## nw = np.size(w[0])
## if (nw > 0): Rh[w] = np.float64(0)
self.Rh[:] = Rh
self.A_wet[:] = A_wet ## (Now shared: 9/9/14)
self.P_wet[:] = P_wet ## (Now shared: 9/9/14)
#---------------
# For testing
#--------------
## print 'dmin, dmax =', d.min(), d.max()
## print 'wmin, wmax =', wb.min(), wb.max()
## print 'amin, amax =', self.angle.min(), self.angle.max()
# update_trapezoid_Rh()
#-------------------------------------------------------------------
def update_friction_factor(self):
#----------------------------------------
# Note: Added on 9/9/14 to streamline.
#----------------------------------------------------------
# Note: f = half of the Fanning friction factor
# d = flow depth [m]
# z0 = roughness length
# S = bed slope (assumed equal to friction slope)
# g = 9.81 = gravitation constant [m/s^2]
#---------------------------------------------------------
# For law of the wall:
# kappa = 0.41 = von Karman's constant
# aval = 0.48 = integration constant
# law_const = sqrt(g)/kappa = 7.6393d
# smoothness = (aval / z0) * d
# f = (kappa / alog(smoothness))^2d
# tau_bed = rho_w * f * u^2 = rho_w * g * d * S
# d, S, and z0 can be arrays.
# To make default z0 correspond to default
# Manning's n, can use this approximation:
# z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d
# For n=0.03, this gives: z0 = 0.011417
#########################################################
# However, for n=0.3, it gives: z0 = 11417.413
# which is 11.4 km! So the approximation only
# holds within some range of values.
#--------------------------------------------------------
###############################################################
# cProfile: This method took: 0.369 secs for topoflow_test()
###############################################################
#--------------------------------------
# Find where (d <= 0). g=good, b=bad
#--------------------------------------
wg = self.d_is_pos
wb = self.d_is_neg
# wg = ( self.d > 0 )
# wb = np.invert( wg )
#-----------------------------
# Compute f for Manning case
#-----------------------------------------
# This makes f=0 and du=0 where (d <= 0)
#-----------------------------------------
if (self.MANNING):
n2 = self.nval ** np.float64(2)
self.f[ wg ] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))
self.f[ wb ] = np.float64(0)
#---------------------------------
# Compute f for Law of Wall case
#---------------------------------
if (self.LAW_OF_WALL):
#------------------------------------------------
# Make sure (smoothness > 1) before taking log.
# Should issue a warning if this is used.
#------------------------------------------------
smoothness = (self.aval / self.z0val) * self.d
np.maximum(smoothness, np.float64(1.1), smoothness) # (in place)
self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)
self.f[wb] = np.float64(0)
##############################################################
# cProfile: This method took: 0.93 secs for topoflow_test()
##############################################################
# #--------------------------------------
# # Find where (d <= 0). g=good, b=bad
# #--------------------------------------
# wg = np.where( self.d > 0 )
# ng = np.size( wg[0])
# wb = np.where( self.d <= 0 )
# nb = np.size( wb[0] )
#
# #-----------------------------
# # Compute f for Manning case
# #-----------------------------------------
# # This makes f=0 and du=0 where (d <= 0)
# #-----------------------------------------
# if (self.MANNING):
# n2 = self.nval ** np.float64(2)
# if (ng != 0):
# self.f[wg] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))
# if (nb != 0):
# self.f[wb] = np.float64(0)
#
# #---------------------------------
# # Compute f for Law of Wall case
# #---------------------------------
# if (self.LAW_OF_WALL):
# #------------------------------------------------
# # Make sure (smoothness > 1) before taking log.
# # Should issue a warning if this is used.
# #------------------------------------------------
# smoothness = (self.aval / self.z0val) * self.d
# np.maximum(smoothness, np.float64(1.1), smoothness) # (in place)
# ## smoothness = np.maximum(smoothness, np.float64(1.1))
# if (ng != 0):
# self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)
# if (nb != 0):
# self.f[wb] = np.float64(0)
#---------------------------------------------
# We could share the Fanning friction factor
#---------------------------------------------
### self.fanning = (np.float64(2) * self.f)
# update_friction_factor()
#-------------------------------------------------------------------
def update_velocity(self):
#---------------------------------------------------------
# Note: Do nothing now unless this method is overridden
# by a particular method of computing velocity.
#---------------------------------------------------------
print("Warning: update_velocity() method is inactive.")
# print 'KINEMATIC WAVE =', self.KINEMATIC_WAVE
# print 'DIFFUSIVE WAVE =', self.DIFFUSIVE_WAVE
# print 'DYNAMIC WAVE =', self.DYNAMIC_WAVE
# update_velocity()
#-------------------------------------------------------------------
def update_velocity_on_edges(self):
#---------------------------------
# Force edge pixels to have u=0.
#----------------------------------------
# Large slope around 1 flows into small
# slope & leads to a negative velocity.
#------------------------------------------------------
# Whenever flow direction is undefined (i.e. noflow),
# the velocity should be zero. Not just on edges.
#------------------------------------------------------
self.u[ self.d8.noflow_IDs ] = np.float64(0)
### self.u[ self.d8.edge_IDs ] = np.float64(0)
# update_velocity_on_edges()
#-------------------------------------------------------------------
def update_froude_number(self):
#----------------------------------------------------------
# Notes: 9/9/14. Added so Froude number could be shared.
# This use of wg & wb reduced cProfile time from:
# 0.644 sec to: 0.121. (9/23/14)
#----------------------------------------------------------
# g = good, b = bad
#--------------------
wg = self.d_is_pos
wb = self.d_is_neg
self.froude[ wg ] = self.u[wg] / np.sqrt( self.g * self.d[wg] )
self.froude[ wb ] = np.float64(0)
# update_froude_number()
#-------------------------------------------------------------
def update_outlet_values(self):
#-------------------------------------------------
# Save computed values at outlet, which are used
# by the TopoFlow driver.
#-----------------------------------------------------
# Note that Q_outlet, etc. are defined as 0D numpy
# arrays to make them "mutable scalars" (i.e.
# this allows changes to be seen by other components
# who have a reference. To preserver the reference,
# however, we must use fill() to assign a new value.
#-----------------------------------------------------
Q_outlet = self.Q[ self.outlet_ID ]
u_outlet = self.u[ self.outlet_ID ]
d_outlet = self.d[ self.outlet_ID ]
f_outlet = self.f[ self.outlet_ID ]
self.Q_outlet.fill( Q_outlet )
self.u_outlet.fill( u_outlet )
self.d_outlet.fill( d_outlet )
self.f_outlet.fill( f_outlet )
## self.Q_outlet.fill( self.Q[ self.outlet_ID ] )
## self.u_outlet.fill( self.u[ self.outlet_ID ] )
## self.d_outlet.fill( self.d[ self.outlet_ID ] )
## self.f_outlet.fill( self.f[ self.outlet_ID ] )
## self.Q_outlet = self.Q[ self.outlet_ID ]
## self.u_outlet = self.u[ self.outlet_ID ]
## self.d_outlet = self.d[ self.outlet_ID ]
## self.f_outlet = self.f[ self.outlet_ID ]
## self.Q_outlet = self.Q.flat[self.outlet_ID]
## self.u_outlet = self.u.flat[self.outlet_ID]
## self.d_outlet = self.d.flat[self.outlet_ID]
## self.f_outlet = self.f.flat[self.outlet_ID]
# update_outlet_values()
#-------------------------------------------------------------
def update_peak_values(self):
#-------------------------------------------
# Using "fill" saves new values "in-place"
# and preserves "mutable scalars".
#-------------------------------------------
if (self.Q_outlet > self.Q_peak):
self.Q_peak.fill( self.Q_outlet )
self.T_peak.fill( self.time_min ) # (time to peak)
#---------------------------------------
if (self.u_outlet > self.u_peak):
self.u_peak.fill( self.u_outlet )
self.Tu_peak.fill( self.time_min )
#---------------------------------------
if (self.d_outlet > self.d_peak):
self.d_peak.fill( self.d_outlet )
self.Td_peak.fill( self.time_min )
## if (self.Q_outlet > self.Q_peak):
## self.Q_peak = self.Q_outlet
## self.T_peak = self.time_min # (time to peak)
## #-----------------------------------
## if (self.u_outlet > self.u_peak):
## self.u_peak = self.u_outlet
## self.Tu_peak = self.time_min
## #-----------------------------------
## if (self.d_outlet > self.d_peak):
## self.d_peak = self.d_outlet
## self.Td_peak = self.time_min
# update_peak_values()
#-------------------------------------------------------------
def update_Q_out_integral(self):
#--------------------------------------------------------
# Note: Renamed "volume_out" to "vol_Q" for consistency
# with vol_P, vol_SM, vol_IN, vol_ET, etc. (5/18/12)
#--------------------------------------------------------
self.vol_Q += (self.Q_outlet * self.dt) ## 5/19/12.
## self.vol_Q += (self.Q[self.outlet_ID] * self.dt)
# update_Q_out_integral()
#-------------------------------------------------------------
def update_mins_and_maxes(self, REPORT=False):
#-------------------------------------------------------
# Note: Only call this at the end, not from update().
#-------------------------------------------------------
#--------------------------------------
# Get mins and max over entire domain
#--------------------------------------
## Q_min = self.Q.min()
## Q_max = self.Q.max()
## #---------------------
## u_min = self.u.min()
## u_max = self.u.max()
## #---------------------
## d_min = self.d.min()
## d_max = self.d.max()
#--------------------------------------------
# Exclude edges where mins are always zero.
#--------------------------------------------
nx = self.nx
ny = self.ny
Q_min = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].min()
Q_max = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].max()
#-------------------------------------------------
u_min = self.u[1:(ny - 2)+1,1:(nx - 2)+1].min()
u_max = self.u[1:(ny - 2)+1,1:(nx - 2)+1].max()
#-------------------------------------------------
d_min = self.d[1:(ny - 2)+1,1:(nx - 2)+1].min()
d_max = self.d[1:(ny - 2)+1,1:(nx - 2)+1].max()
#-------------------------------------------------
# (2/6/13) This preserves "mutable scalars" that
# can be accessed as refs by other components.
#-------------------------------------------------
if (Q_min < self.Q_min):
self.Q_min.fill( Q_min )
if (Q_max > self.Q_max):
self.Q_max.fill( Q_max )
#------------------------------
if (u_min < self.u_min):
self.u_min.fill( u_min )
if (u_max > self.u_max):
self.u_max.fill( u_max )
#------------------------------
if (d_min < self.d_min):
self.d_min.fill( d_min )
if (d_max > self.d_max):
self.d_max.fill( d_max )
#-------------------------------------------------
# (2/6/13) This preserves "mutable scalars" that
# can be accessed as refs by other components.
#-------------------------------------------------
## self.Q_min.fill( np.minimum( self.Q_min, Q_min ) )
## self.Q_max.fill( np.maximum( self.Q_max, Q_max ) )
## #---------------------------------------------------
## self.u_min.fill( np.minimum( self.u_min, u_min ) )
## self.u_max.fill( np.maximum( self.u_max, u_max ) )
## #---------------------------------------------------
## self.d_min.fill( np.minimum( self.d_min, d_min ) )
## self.d_max.fill( np.maximum( self.d_max, d_max ) )
#-------------------------------------------------
# (2/6/13) This preserves "mutable scalars" that
# can be accessed as refs by other components.
#-------------------------------------------------
## self.Q_min.fill( min( self.Q_min, Q_min ) )
## self.Q_max.fill( max( self.Q_max, Q_max ) )
## #---------------------------------------------------
## self.u_min.fill( min( self.u_min, u_min ) )
## self.u_max.fill( max( self.u_max, u_max ) )
## #---------------------------------------------------
## self.d_min.fill( min( self.d_min, d_min ) )
## self.d_max.fill( max( self.d_max, d_max ) )
#----------------------------------------------
# (2/6/13) This produces "immutable scalars".
#----------------------------------------------
## self.Q_min = self.Q.min()
## self.Q_max = self.Q.max()
## self.u_min = self.u.min()
## self.u_max = self.u.max()
## self.d_min = self.d.min()
## self.d_max = self.d.max()
if (REPORT):
print('In channels_base.update_mins_and_maxes():')
print('(dmin, dmax) =', self.d_min, self.d_max)
print('(umin, umax) =', self.u_min, self.u_max)
print('(Qmin, Qmax) =', self.Q_min, self.Q_max)
print(' ')
# update_mins_and_maxes()
#-------------------------------------------------------------
def update_total_channel_water_volume(self, REPORT=False):
#----------------------------------------------------
# Note: Compute the total volume of water in all
# channels for the entire DEM. Can use this
# in the final mass balance reporting.
# (2019-09-17)
#----------------------------------------------------
# Note: This should be called from finalize().
#----------------------------------------------------
vol = self.vol
vol[ self.d8.noflow_IDs ] = 0.0
## vol[ self.d8.edge_IDs ] = 0.0
vol_chan = np.sum( vol )
self.vol_chan.fill( vol_chan )
#-------------------------------------
# Exclude values on edges of the DEM?
#-------------------------------------
# nx = self.nx
# ny = self.ny
# vol = self.vol[1:(ny - 2)+1,1:(nx - 2)+1].min()
# update_total_channel_water_volume()
#-------------------------------------------------------------
def update_total_land_water_volume(self, REPORT=False):
#----------------------------------------------------
# Note: Compute the total volume of land water in
# all grid cells for the entire DEM. Use
# this in the final mass balance reporting.
# (2019-09-17)
#----------------------------------------------------
#-------------------------------------
# Exclude values on edges of the DEM?
#-------------------------------------
# nx = self.nx
# ny = self.ny
# d_flood = self.d_flood[1:(ny - 2)+1,1:(nx - 2)+1].min()
d_flood = self.d_flood
vol_land = np.sum( d_flood * self.da )
self.vol_land.fill( vol_land )
# update_total_land_water_volume()
#-------------------------------------------------------------------
def check_flow_depth_LAST(self):
OK = True
d = self.d
dt = self.dt
nx = self.nx #################
#---------------------------------
# All all flow depths positive ?
#---------------------------------
wbad = np.where( np.logical_or( d < 0.0, np.logical_not(np.isfinite(d)) ))
nbad = np.size( wbad[0] )
if (nbad == 0):
return OK
OK = False
dmin = d[wbad].min()
star_line = '*******************************************'
msg = [ star_line, \
'ERROR: Simulation aborted.', ' ', \
'Negative or NaN depth found: ' + str(dmin), \
'Time step may be too large.', \
'Time step: ' + str(dt) + ' [s]' ]
for k in range(len(msg)):
print(msg[k])
#-------------------------------------------
# If not too many, print actual velocities
#-------------------------------------------
if (nbad < 30):
brow = wbad[0][0]
bcol = wbad[1][0]
## badi = wbad[0]
## bcol = (badi % nx)
## brow = (badi / nx)
crstr = str(bcol) + ', ' + str(brow)
msg = [' ', '(Column, Row): ' + crstr, \
'Flow depth: ' + str(d[brow, bcol])]
for k in range(len(msg)):
print(msg[k])
print(star_line)
print(' ')
raise RuntimeError('Negative depth found.') # (11/16/16)
return OK
# check_flow_depth_LAST()
#-------------------------------------------------------------------
def check_flow_depth(self):
OK = True
d = self.d
dt = self.dt
nx = self.nx #################
#---------------------------------
# Are any flow depths negative ?
#---------------------------------
wneg = np.where( d < 0.0 )
nneg = np.size( wneg[0] )
#-----------------------------
# Are any flow depths NaNs ?
#-----------------------------
wnan = np.where( np.isnan(d) )
nnan = np.size( wnan[0] )
#-----------------------------
# Are any flow depths Infs ?
#-----------------------------
winf = np.where( np.isinf(d) )
ninf = np.size( winf[0] )
#----------------------------------
# Option to allow NaN but not Inf
#----------------------------------
if (nneg == 0) and (ninf == 0):
return OK
OK = False
#--------------------------------------------------
# if (nneg == 0) and (nnan == 0) and (ninf == 0):
# return OK
# OK = False
#----------------------------------
# Print informative error message
#----------------------------------
star_line = '*******************************************'
print( star_line )
print('ERROR: Simulation aborted.')
print(' ')
#--------------------------------------------------------
if (nneg > 0):
dmin = d[ wneg ].min()
str1 = 'Found ' + str(nneg) + ' negative depths.'
str2 = ' Smallest negative depth = ' + str(dmin)
print( str1 )
print( str2 )
#--------------------------------------------------------
if (nnan > 0):
str3 = 'Found ' + str(nnan) + ' NaN depths.'
print( str3 )
#--------------------------------------------------------
if (ninf > 0):
str4 = 'Found ' + str(ninf) + ' infinite depths.'
print( str4 )
#------------------------------------
# Option to allow NaNs on the edges
#------------------------------------
print( 'Time step may be too large for stability.' )
print( 'Time step: ' + str(dt) + ' [s]' )
print( 'Try reducing timestep in channels CFG file.' )
print( star_line )
print( ' ' )
#-------------------------------------------
# If not too many, print actual depths
#-------------------------------------------
# if (nbad < 30):
# brow = wbad[0][0]
# bcol = wbad[1][0]
# ## badi = wbad[0]
# ## bcol = (badi % nx)
# ## brow = (badi / nx)
# crstr = str(bcol) + ', ' + str(brow)
#
# msg = [' ', '(Column, Row): ' + crstr, \
# 'Flow depth: ' + str(d[brow, bcol])]
# for k in range(len(msg)):
# print(msg[k])
# print(star_line)
# print(' ')
raise RuntimeError('Negative or NaN depth found.') # (11/16/16)
return OK
# check_flow_depth()
#-------------------------------------------------------------------
def check_flow_velocity_LAST(self):
OK = True
u = self.u
dt = self.dt
nx = self.nx
#--------------------------------
# Are all velocities positive ?
#--------------------------------
wbad = np.where( np.logical_or( u < 0.0, np.logical_not(np.isfinite(u)) ))
nbad = np.size( wbad[0] )
if (nbad == 0):
return OK
OK = False
umin = u[wbad].min()
star_line = '*******************************************'
msg = [ star_line, \
'ERROR: Simulation aborted.', ' ', \
'Negative or NaN velocity found: ' + str(umin), \
'Time step may be too large.', \
'Time step: ' + str(dt) + ' [s]']
for k in range(len(msg)):
print(msg[k])
#-------------------------------------------
# If not too many, print actual velocities
#-------------------------------------------
if (nbad < 30):
brow = wbad[0][0]
bcol = wbad[1][0]
## badi = wbad[0]
## bcol = (badi % nx)
## brow = (badi / nx)
crstr = str(bcol) + ', ' + str(brow)
msg = [' ', '(Column, Row): ' + crstr, \
'Velocity: ' + str(u[brow, bcol])]
for k in range(len(msg)):
print(msg[k])
print(star_line)
print(' ')
raise RuntimeError('Negative or NaN velocity found.') # (11/16/16)
return OK
## umin = u[wbad].min()
## badi = wbad[0]
## bcol = (badi % nx)
## brow = (badi / nx)
## crstr = str(bcol) + ', ' + str(brow)
## msg = np.array([' ', \
## '*******************************************', \
## 'ERROR: Simulation aborted.', ' ', \
## 'Negative velocity found: ' + str(umin), \
## 'Time step may be too large.', ' ', \
## '(Column, Row): ' + crstr, \
## 'Velocity: ' + str(u[badi]), \
## 'Time step: ' + str(dt) + ' [s]', \
## '*******************************************', ' '])
## for k in xrange( np.size(msg) ):
## print msg[k]
## return OK
# check_flow_velocity_LAST()
#-------------------------------------------------------------------
def check_flow_velocity(self):
OK = True
u = self.u
dt = self.dt
nx = self.nx
#---------------------------------
# Are any flow depths negative ?
#---------------------------------
wneg = np.where( u < 0.0 )
nneg = np.size( wneg[0] )
#-----------------------------
# Are any flow depths NaNs ?
#-----------------------------
wnan = np.where( np.isnan(u) )
nnan = np.size( wnan[0] )
#-----------------------------
# Are any flow depths Infs ?
#-----------------------------
winf = np.where( np.isinf(u) )
ninf = np.size( winf[0] )
#----------------------------------
# Option to allow NaN but not Inf
#----------------------------------
if (nneg == 0) and (ninf == 0):
return OK
OK = False
#--------------------------------------------------
# if (nneg == 0) and (nnan == 0) and (ninf == 0):
# return OK
# OK = False
#----------------------------------
# Print informative error message
#----------------------------------
star_line = '*******************************************'
print( star_line )
print('ERROR: Simulation aborted.')
print(' ')
#--------------------------------------------------------
if (nneg > 0):
umin = u[ wneg ].min()
str1 = 'Found ' + str(nneg) + ' negative velocities.'
str2 = ' Smallest negative velocity = ' + str(umin)
print( str1 )
print( str2 )
#--------------------------------------------------------
if (nnan > 0):
str3 = 'Found ' + str(nnan) + ' NaN velocities.'
print( str3 )
#--------------------------------------------------------
if (ninf > 0):
str4 = 'Found ' + str(ninf) + ' infinite velocities.'
print( str4 )
#------------------------------------
# Option to allow NaNs on the edges
#------------------------------------
print( 'Time step may be too large for stability.' )
print( 'Time step: ' + str(dt) + ' [s]' )
print( 'Try reducing timestep in channels CFG file.' )
print( star_line )
print( ' ' )
raise RuntimeError('Negative or NaN velocity found.') # (11/16/16)
return OK
## umin = u[wbad].min()
## badi = wbad[0]
## bcol = (badi % nx)
## brow = (badi / nx)
## crstr = str(bcol) + ', ' + str(brow)
## msg = np.array([' ', \
## '*******************************************', \
## 'ERROR: Simulation aborted.', ' ', \
## 'Negative velocity found: ' + str(umin), \
## 'Time step may be too large.', ' ', \
## '(Column, Row): ' + crstr, \
## 'Velocity: ' + str(u[badi]), \
## 'Time step: ' + str(dt) + ' [s]', \
## '*******************************************', ' '])
## for k in xrange( np.size(msg) ):
## print msg[k]
## return OK
# check_flow_velocity()
#-------------------------------------------------------------------
def open_input_files(self):
#------------------------------------------------------
# This method uses prepend_directory() in BMI_base.py
# which uses both eval and exec.
#------------------------------------------------------
# in_files = ['slope_file', 'nval_file', 'z0val_file',
# 'width_file', 'angle_file', 'sinu_file',
# 'd0_file', 'd_bankfull_file' ]
# self.prepend_directory( in_files, INPUT=True )
#------------------------------------------------------
# This avoids eval/exec, but is brute-force
# 2020-05-03. Changed in_directory to topo_directory.
# See set_directories() in BMI_base.py.
#------------------------------------------------------
self.slope_file = (self.topo_directory + self.slope_file)
self.nval_file = (self.topo_directory + self.nval_file)
self.z0val_file = (self.topo_directory + self.z0val_file)
self.width_file = (self.topo_directory + self.width_file)
self.angle_file = (self.topo_directory + self.angle_file)
self.sinu_file = (self.topo_directory + self.sinu_file)
self.d0_file = (self.topo_directory + self.d0_file)
self.d_bankfull_file = (self.topo_directory + self.d_bankfull_file)
#----------------------------------------------
# Open all input files and store file objects
#----------------------------------------------
#self.code_unit = model_input.open_file(self.code_type, self.code_file)
self.slope_unit = model_input.open_file(self.slope_type, self.slope_file)
if (self.MANNING):
self.nval_unit = model_input.open_file(self.nval_type, self.nval_file)
if (self.LAW_OF_WALL):
self.z0val_unit = model_input.open_file(self.z0val_type, self.z0val_file)
self.width_unit = model_input.open_file(self.width_type, self.width_file)
self.angle_unit = model_input.open_file(self.angle_type, self.angle_file)
self.sinu_unit = model_input.open_file(self.sinu_type, self.sinu_file)
self.d0_unit = model_input.open_file(self.d0_type, self.d0_file)
self.d_bankfull_unit = model_input.open_file(self.d_bankfull_type, self.d_bankfull_file)
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
#-------------------------------------------------------
# Note: All grids are assumed to have same dimensions
# as the DEM.
#-------------------------------------------------------
rti = self.rti
#-------------------------------------------------------
# All grids are assumed to have a data type of Float32
# as stored in their binary grid file.
#-------------------------------------------------------
# If EOF is reached, model_input.read_next() does not
# change the value of the scalar or grid.
#-------------------------------------------------------
slope = model_input.read_next(self.slope_unit, self.slope_type, rti)
if (slope is not None):
self.update_var( 'slope', slope )
if (self.MANNING):
nval = model_input.read_next(self.nval_unit, self.nval_type, rti)
if (nval is not None):
self.update_var( 'nval', nval )
if (self.LAW_OF_WALL):
z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti)
if (z0val is not None):
self.update_var( 'z0val', z0val )
width = model_input.read_next(self.width_unit, self.width_type, rti)
if (width is not None):
#-------------------------------------------------------
# Width can be zero on 4 edges, but this can result in
# a "divide by zero" error later on, so need to adjust.
#-------------------------------------------------------
w1 = ( width == 0 ) # (arrays of True or False)
width[w1] = self.d8.dw[w1]
self.update_var( 'width', width )
angle = model_input.read_next(self.angle_unit, self.angle_type, rti)
if (angle is not None):
#------------------------------------------------------------
# Convert bank angles from degrees to radians. For a
# SCALAR angle, this is done in initialize_computed_vars().
# To support general case this is done here for angle GRID.
#------------------------------------------------------------
angle *= self.deg_to_rad # [radians]
self.update_var( 'angle', angle )
sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti)
if (sinu is not None):
self.update_var( 'sinu', sinu )
d0 = model_input.read_next(self.d0_unit, self.d0_type, rti)
if (d0 is not None):
self.update_var( 'd0', d0 )
# (2019-09-16) ##############################
d_bankfull = model_input.read_next(self.d_bankfull_unit, self.d_bankfull_type, rti)
if (d_bankfull is not None):
self.update_var( 'd_bankfull', d_bankfull )
# read_input_files()
#-------------------------------------------------------------------
# def read_input_files_last(self):
#
# #----------------------------------------------------
# # The D8 flow codes are always a grid, size of DEM.
# #----------------------------------------------------
# # NB! model_input.py also has a read_grid() function.
# #----------------------------------------------------
# rti = self.rti
# ## print 'Reading D8 flow grid (in CHANNELS)...'
# ## self.code = rtg_files.read_grid(self.code_file, rti,
# ## RTG_type='BYTE')
# ## print ' '
#
# #-------------------------------------------------------
# # All grids are assumed to have a data type of Float32.
# #-------------------------------------------------------
# slope = model_input.read_next(self.slope_unit, self.slope_type, rti)
# if (slope is not None):
# self.slope = slope
# ## print ' min(slope) =', slope.min()
# ## print ' max(slope) =', slope.max()
#
# # If EOF was reached, hopefully numpy's "fromfile"
# # returns None, so that the stored value will be
# # the last value that was read.
#
# if (self.MANNING):
# nval = model_input.read_next(self.nval_unit, self.nval_type, rti)
# if (nval is not None):
# # if (self.nval_type.lower() == 'scalar'):
# # self.update_scalar( 'nval', nval )
# # else:
# # self.nval = nval
# self.nval = nval
# self.nval_min = nval.min()
# self.nval_max = nval.max()
# print ' min(nval) =', self.nval_min
# print ' max(nval) =', self.nval_max
#
# if (self.LAW_OF_WALL):
# z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti)
# if (z0val is not None):
# self.z0val = z0val
# self.z0val_min = z0val.min()
# self.z0val_max = z0val.max()
# print ' min(z0val) =', self.z0val_min
# print ' max(z0val) =', self.z0val_max
#
# width = model_input.read_next(self.width_unit, self.width_type, rti)
# if (width is not None):
# #-------------------------------------------------------
# # Width can be zero on 4 edges, but this can result in
# # a "divide by zero" error later on, so need to adjust.
# #-------------------------------------------------------
# w1 = ( width == 0 ) # (arrays of True or False)
# width[w1] = self.d8.dw[w1]
# self.width = width
# print ' min(width) =', width.min()
# print ' max(width) =', width.max()
#
# angle = model_input.read_next(self.angle_unit, self.angle_type, rti)
# if (angle is not None):
# print ' min(angle) =', angle.min(), ' [deg]'
# print ' max(angle) =', angle.max(), ' [deg]'
# #-----------------------------------------------
# # Convert bank angles from degrees to radians.
# #-----------------------------------------------
# self.angle = angle * self.deg_to_rad # [radians]
# ### self.angle = angle # (before 9/9/14)
#
# sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti)
# if (sinu is not None):
# self.sinu = sinu
# print ' min(sinuosity) =', sinu.min()
# print ' max(sinuosity) =', sinu.max()
#
# d0 = model_input.read_next(self.d0_unit, self.d0_type, rti)
# if (d0 is not None):
# self.d0 = d0
# print ' min(d0) =', d0.min()
# print ' max(d0) =', d0.max()
#
# ## code = model_input.read_grid(self.code_unit, \
# ## self.code_type, rti, dtype='UInt8')
# ## if (code is not None): self.code = code
#
# # read_input_files_last()
#-------------------------------------------------------------------
def close_input_files(self):
# if not(self.slope_unit.closed):
# if (self.slope_unit is not None):
#-------------------------------------------------
# NB! self.code_unit was never defined as read.
#-------------------------------------------------
# if (self.code_type != 'scalar'): self.code_unit.close()
if (self.slope_type != 'Scalar'): self.slope_unit.close()
if (self.MANNING):
if (self.nval_type != 'Scalar'): self.nval_unit.close()
if (self.LAW_OF_WALL):
if (self.z0val_type != 'Scalar'): self.z0val_unit.close()
if (self.width_type != 'Scalar'): self.width_unit.close()
if (self.angle_type != 'Scalar'): self.angle_unit.close()
if (self.sinu_type != 'Scalar'): self.sinu_unit.close()
if (self.d0_type != 'Scalar'): self.d0_unit.close()
if (self.d_bankfull_type != 'Scalar'): self.d_bankfull_unit.close()
## if (self.slope_file != ''): self.slope_unit.close()
## if (self.MANNING):
## if (self.nval_file != ''): self.nval_unit.close()
## if (self.LAW_OF_WALL):
## if (self.z0val_file != ''): self.z0val_unit.close()
## if (self.width_file != ''): self.width_unit.close()
## if (self.angle_file != ''): self.angle_unit.close()
## if (self.sinu_file != ''): self.sinu_unit.close()
## if (self.d0_file != ''): self.d0_unit.close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.Q_gs_file = (self.out_directory + self.Q_gs_file)
self.u_gs_file = (self.out_directory + self.u_gs_file)
self.d_gs_file = (self.out_directory + self.d_gs_file)
self.f_gs_file = (self.out_directory + self.f_gs_file)
self.d_flood_gs_file = (self.out_directory + self.d_flood_gs_file)
#--------------------------------------------------------
self.Q_ts_file = (self.out_directory + self.Q_ts_file)
self.u_ts_file = (self.out_directory + self.u_ts_file)
self.d_ts_file = (self.out_directory + self.d_ts_file)
self.f_ts_file = (self.out_directory + self.f_ts_file)
self.d_flood_ts_file = (self.out_directory + self.d_flood_ts_file)
# update_outfile_names()
#-------------------------------------------------------------------
def bundle_output_files(self):
###################################################
# NOT READY YET. Need "get_long_name()" and a new
# version of "get_var_units". (9/21/14)
###################################################
#-------------------------------------------------------------
# Bundle the output file info into an array for convenience.
# Then we just need one open_output_files(), in BMI_base.py,
# and one close_output_files(). Less to maintain. (9/21/14)
#-------------------------------------------------------------
# gs = grid stack, ts = time series, ps = profile series.
#-------------------------------------------------------------
self.out_files = [
{var_name:'Q',
save_gs:self.SAVE_Q_GRIDS, gs_file:self.Q_gs_file,
save_ts:self.SAVE_Q_PIXELS, ts_file:self.Q_ts_file,
long_name:get_long_name('Q'), units_name:get_var_units('Q')},
#-----------------------------------------------------------------
{var_name:'u',
save_gs:self.SAVE_U_GRIDS, gs_file:self.u_gs_file,
save_ts:self.SAVE_U_PIXELS, ts_file:self.u_ts_file,
long_name:get_long_name('u'), units_name:get_var_units('u')},
#-----------------------------------------------------------------
{var_name:'d',
save_gs:self.SAVE_D_GRIDS, gs_file:self.d_gs_file,
save_ts:self.SAVE_D_PIXELS, ts_file:self.d_ts_file,
long_name:get_long_name('d'), units_name:get_var_units('d')},
#-----------------------------------------------------------------
{var_name:'f',
save_gs:self.SAVE_F_GRIDS, gs_file:self.f_gs_file,
save_ts:self.SAVE_F_PIXELS, ts_file:self.f_ts_file,
long_name:get_long_name('f'), units_name:get_var_units('f')},
#-----------------------------------------------------------------
{var_name:'d_flood',
save_gs:self.SAVE_DF_GRIDS, gs_file:self.d_flood_gs_file,
save_ts:self.SAVE_DF_PIXELS, ts_file:self.d_flood_ts_file,
long_name:get_long_name('d_flood'), units_name:get_var_units('d_flood')} ]
# bundle_output_files
#-------------------------------------------------------------------
def disable_all_output(self):
self.SAVE_Q_GRIDS = False
self.SAVE_U_GRIDS = False
self.SAVE_D_GRIDS = False
self.SAVE_F_GRIDS = False
self.SAVE_DF_GRIDS = False
#----------------------------
self.SAVE_Q_PIXELS = False
self.SAVE_U_PIXELS = False
self.SAVE_D_PIXELS = False
self.SAVE_F_PIXELS = False
self.SAVE_DF_PIXELS = False
# disable_all_output()
#-------------------------------------------------------------------
def open_output_files(self):
model_output.check_netcdf()
self.update_outfile_names()
## self.bundle_output_files()
## print 'self.SAVE_Q_GRIDS =', self.SAVE_Q_GRIDS
## print 'self.SAVE_U_GRIDS =', self.SAVE_U_GRIDS
## print 'self.SAVE_D_GRIDS =', self.SAVE_D_GRIDS
## print 'self.SAVE_F_GRIDS =', self.SAVE_F_GRIDS
## #---------------------------------------------------
## print 'self.SAVE_Q_PIXELS =', self.SAVE_Q_PIXELS
## print 'self.SAVE_U_PIXELS =', self.SAVE_U_PIXELS
## print 'self.SAVE_D_PIXELS =', self.SAVE_D_PIXELS
## print 'self.SAVE_F_PIXELS =', self.SAVE_F_PIXELS
# IDs = self.outlet_IDs
# for k in xrange( len(self.out_files) ):
# #--------------------------------------
# # Open new files to write grid stacks
# #--------------------------------------
# if (self.out_files[k].save_gs):
# model_output.open_new_gs_file( self, self.out_files[k], self.rti )
# #--------------------------------------
# # Open new files to write time series
# #--------------------------------------
# if (self.out_files[k].save_ts):
# model_output.open_new_ts_file( self, self.out_files[k], IDs )
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
if (self.SAVE_Q_GRIDS):
model_output.open_new_gs_file( self, self.Q_gs_file, self.rti,
var_name='Q',
long_name='volumetric_discharge',
units_name='m^3/s')
if (self.SAVE_U_GRIDS):
model_output.open_new_gs_file( self, self.u_gs_file, self.rti,
var_name='u',
long_name='mean_channel_flow_velocity',
units_name='m/s')
if (self.SAVE_D_GRIDS):
model_output.open_new_gs_file( self, self.d_gs_file, self.rti,
var_name='d',
long_name='max_channel_flow_depth',
units_name='m')
if (self.SAVE_F_GRIDS):
model_output.open_new_gs_file( self, self.f_gs_file, self.rti,
var_name='f',
long_name='friction_factor',
units_name='none')
if (self.SAVE_DF_GRIDS):
model_output.open_new_gs_file( self, self.d_flood_gs_file, self.rti,
var_name='d_flood',
long_name='land_surface_water__depth',
units_name='m')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_Q_PIXELS):
model_output.open_new_ts_file( self, self.Q_ts_file, IDs,
var_name='Q',
long_name='volumetric_discharge',
units_name='m^3/s')
if (self.SAVE_U_PIXELS):
model_output.open_new_ts_file( self, self.u_ts_file, IDs,
var_name='u',
long_name='mean_channel_flow_velocity',
units_name='m/s')
if (self.SAVE_D_PIXELS):
model_output.open_new_ts_file( self, self.d_ts_file, IDs,
var_name='d',
long_name='max_channel_flow_depth',
units_name='m')
if (self.SAVE_F_PIXELS):
model_output.open_new_ts_file( self, self.f_ts_file, IDs,
var_name='f',
long_name='friction_factor',
units_name='none')
if (self.SAVE_DF_PIXELS):
model_output.open_new_ts_file( self, self.d_flood_ts_file, IDs,
var_name='d_flood',
long_name='land_surface_water__depth',
units_name='m')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
#---------------------------------------------------------
# Notes: This function was written to use only model
# time (maybe from a caller) in seconds, and
# the save_grid_dt and save_pixels_dt parameters
# read by read_cfg_file().
#
# read_cfg_file() makes sure that all of
# the "save_dts" are larger than or equal to the
# process dt.
#---------------------------------------------------------
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
## if ((self.time_index % self.grid_save_step) == 0):
## self.save_grids()
## if ((self.time_index % self.pixel_save_step) == 0):
## self.save_pixel_values()
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
if (self.SAVE_Q_GRIDS): model_output.close_gs_file( self, 'Q')
if (self.SAVE_U_GRIDS): model_output.close_gs_file( self, 'u')
if (self.SAVE_D_GRIDS): model_output.close_gs_file( self, 'd')
if (self.SAVE_F_GRIDS): model_output.close_gs_file( self, 'f')
if (self.SAVE_DF_GRIDS): model_output.close_gs_file( self, 'd_flood')
#---------------------------------------------------------------
if (self.SAVE_Q_PIXELS): model_output.close_ts_file( self, 'Q')
if (self.SAVE_U_PIXELS): model_output.close_ts_file( self, 'u')
if (self.SAVE_D_PIXELS): model_output.close_ts_file( self, 'd')
if (self.SAVE_F_PIXELS): model_output.close_ts_file( self, 'f')
if (self.SAVE_DF_PIXELS): model_output.close_ts_file( self, 'd_flood')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
#-----------------------------------
# Save grid stack to a netCDF file
#---------------------------------------------
# Note that add_grid() methods will convert
# var from scalar to grid now, if necessary.
#---------------------------------------------
if (self.SAVE_Q_GRIDS):
model_output.add_grid( self, self.Q, 'Q', self.time_min )
if (self.SAVE_U_GRIDS):
model_output.add_grid( self, self.u, 'u', self.time_min )
if (self.SAVE_D_GRIDS):
model_output.add_grid( self, self.d, 'd', self.time_min )
if (self.SAVE_F_GRIDS):
model_output.add_grid( self, self.f, 'f', self.time_min )
if (self.SAVE_DF_GRIDS):
model_output.add_grid( self, self.d_flood, 'd_flood', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self): ##### save_time_series_data(self) #######
IDs = self.outlet_IDs
time = self.time_min #####
#-------------
# New method
#-------------
if (self.SAVE_Q_PIXELS):
model_output.add_values_at_IDs( self, time, self.Q, 'Q', IDs )
if (self.SAVE_U_PIXELS):
model_output.add_values_at_IDs( self, time, self.u, 'u', IDs )
if (self.SAVE_D_PIXELS):
model_output.add_values_at_IDs( self, time, self.d, 'd', IDs )
if (self.SAVE_F_PIXELS):
model_output.add_values_at_IDs( self, time, self.f, 'f', IDs )
if (self.SAVE_DF_PIXELS):
model_output.add_values_at_IDs( self, time, self.d_flood, 'd_flood', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
def manning_formula(self):
#---------------------------------------------------------
# Notes: R = (A/P) = hydraulic radius [m]
# N = Manning's roughness coefficient
# (usually in the range 0.012 to 0.035)
# S = bed slope or free slope
# R,S, and N may be 2D arrays.
# If length units are all *feet*, then an extra
# factor of 1.49 must be applied. If units are
# meters, no such factor is needed.
# Note that Q = Ac * u, where Ac is cross-section
# area. For a trapezoid, Ac does not equal w*d.
#---------------------------------------------------------
if (self.KINEMATIC_WAVE):
S = self.S_bed
else:
S = self.S_free
u = (self.Rh ** self.two_thirds) * np.sqrt(S) / self.nval
#--------------------------------------------------------
# Add a hydraulic jump option for when u gets too big ?
#--------------------------------------------------------
return u
# manning_formula()
#-------------------------------------------------------------------
def law_of_the_wall(self):
#---------------------------------------------------------
# Notes: u = flow velocity [m/s]
# d = flow depth [m]
# z0 = roughness length
# S = bed slope or free slope
# g = 9.81 = gravitation constant [m/s^2]
# kappa = 0.41 = von Karman's constant
# aval = 0.48 = integration constant
# law_const = sqrt(g)/kappa = 7.6393d
# smoothness = (aval / z0) * d
# f = (kappa / alog(smoothness))^2d
# tau_bed = rho_w * f * u^2 = rho_w * g * d * S
# d, S, and z0 can be arrays.
# To make default z0 correspond to default
# Manning's n, can use this approximation:
# z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d
# For n=0.03, this gives: z0 = 0.011417
#########################################################
# However, for n=0.3, it gives: z0 = 11417.413
# which is 11.4 km! So the approximation only
# holds within some range of values.
#--------------------------------------------------------
if (self.KINEMATIC_WAVE):
S = self.S_bed
else:
S = self.S_free
smoothness = (self.aval / self.z0val) * self.d
#------------------------------------------------
# Make sure (smoothness > 1) before taking log.
# Should issue a warning if this is used.
#------------------------------------------------
smoothness = np.maximum(smoothness, np.float64(1.1))
u = self.law_const * np.sqrt(self.Rh * S) * np.log(smoothness)
#--------------------------------------------------------
# Add a hydraulic jump option for when u gets too big ?
#--------------------------------------------------------
return u
# law_of_the_wall()
#-------------------------------------------------------------------
def print_status_report(self):
#----------------------------------------------------
# Wherever depth is less than z0, assume that water
# is not flowing and set u and Q to zero.
# However, we also need (d gt 0) to avoid a divide
# by zero problem, even when numerators are zero.
#----------------------------------------------------
# FLOWING = (d > (z0/aval))
#*** FLOWING[noflow_IDs] = False ;******
wflow = np.where( FLOWING != 0 )
n_flow = np.size( wflow[0] )
n_pixels = self.rti.n_pixels
percent = np.float64(100.0) * (np.float64(n_flow) / n_pixels)
fstr = ('%5.1f' % percent) + '%'
# fstr = idl_func.string(percent, format='(F5.1)').strip() + '%'
print(' Percentage of pixels with flow = ' + fstr)
print(' ')
self.update_mins_and_maxes(REPORT=True)
wmax = np.where(self.Q == self.Q_max)
nwmax = np.size(wmax[0])
print(' Max(Q) occurs at: ' + str( wmax[0] ))
#print,' Max attained at ', nwmax, ' pixels.'
print(' ')
print('-------------------------------------------------')
# print_status_report()
#-------------------------------------------------------------------
# def remove_bad_slopes0(self, FLOAT=False):
#
# #------------------------------------------------------------
# # Notes: The main purpose of this routine is to find
# # pixels that have nonpositive slopes and replace
# # then with the smallest value that occurs anywhere
# # in the input slope grid. For example, pixels on
# # the edges of the DEM will have a slope of zero.
#
# # With the Kinematic Wave option, flow cannot leave
# # a pixel that has a slope of zero and the depth
# # increases in an unrealistic manner to create a
# # spike in the depth grid.
#
# # It would be better, of course, if there were
# # no zero-slope pixels in the DEM. We could use
# # an "Imposed gradient DEM" to get slopes or some
# # method of "profile smoothing".
#
# # It is possible for the flow code to be nonzero
# # at a pixel that has NaN for its slope. For these
# # pixels, we also set the slope to our min value.
#
# # 7/18/05. Broke this out into separate procedure.
# #------------------------------------------------------------
#
# #-----------------------------------
# # Are there any "bad" pixels ?
# # If not, return with no messages.
# #-----------------------------------
# wb = np.where(np.logical_or((self.slope <= 0.0), \
# np.logical_not(np.isfinite(self.slope))))
# nbad = np.size(wb[0])
# print('size(slope) = ' + str(np.size(self.slope)) )
# print('size(wb) = ' + str(nbad) )
#
# wg = np.where(np.invert(np.logical_or((self.slope <= 0.0), \
# np.logical_not(np.isfinite(self.slope)))))
# ngood = np.size(wg[0])
# if (nbad == 0) or (ngood == 0):
# return
#
# #---------------------------------------------
# # Find smallest positive value in slope grid
# # and replace the "bad" values with smin.
# #---------------------------------------------
# print('-------------------------------------------------')
# print('WARNING: Zero or negative slopes found.')
# print(' Replacing them with smallest slope.')
# print(' Use "Profile smoothing tool" instead.')
# S_min = self.slope[wg].min()
# S_max = self.slope[wg].max()
# print(' min(S) = ' + str(S_min))
# print(' max(S) = ' + str(S_max))
# print('-------------------------------------------------')
# print(' ')
# self.slope[wb] = S_min
#
# #--------------------------------
# # Convert data type to double ?
# #--------------------------------
# if (FLOAT):
# self.slope = np.float32(self.slope)
# else:
# self.slope = np.float64(self.slope)
#
# # remove_bad_slopes0()
#-------------------------------------------------------------------
def remove_bad_slopes(self, FLOAT=False):
#------------------------------------------------------------
# Notes: The main purpose of this routine is to find
# pixels that have nonpositive slopes and replace
# then with the smallest value that occurs anywhere
# in the input slope grid. For example, pixels on
# the edges of the DEM will have a slope of zero.
# With the Kinematic Wave option, flow cannot leave
# a pixel that has a slope of zero and the depth
# increases in an unrealistic manner to create a
# spike in the depth grid.
# It would be better, of course, if there were
# no zero-slope pixels in the DEM. We could use
# an "Imposed gradient DEM" to get slopes or some
# method of "profile smoothing".
# It is possible for the flow code to be nonzero
# at a pixel that has NaN for its slope. For these
# pixels, we also set the slope to our min value.
# 7/18/05. Broke this out into separate procedure.
#------------------------------------------------------------
#------------------------
# Are any slopes Nans ?
#------------------------
wnan = np.where( np.isnan( self.slope ) )
nnan = np.size( wnan[0] )
#-------------------------------
# Are any slopes nonpositive ?
#-------------------------------
wneg = np.where( self.slope <= 0.0 )
nneg = np.size( wneg[0] )
#-------------------------------
# Are any slopes infinite ?
#-------------------------------
winf = np.where( np.isinf( self.slope ) )
ninf = np.size( winf[0] )
#----------------------------
nbad = (nnan + nneg + ninf)
if (nbad == 0):
return
#---------------------------
# Merge "wheres" into wbad
#---------------------------
S_shape = self.slope.shape
bad = np.zeros( S_shape, dtype='bool' )
if (nnan > 0): bad[ wnan ] = True
if (nneg > 0): bad[ wneg ] = True
if (ninf > 0): bad[ winf ] = True
good = np.invert( bad )
#--------------------
# Print information
#--------------------
print('Total number of slope values = ' + str(np.size(self.slope)) )
print('Number of nonpositive values = ' + str(nneg) )
print('Number of NaN values = ' + str(nnan) )
print('Number of infinite values = ' + str(ninf) )
#---------------------------------------------
# Find smallest positive value in slope grid
# and replace the "bad" values with smin.
#---------------------------------------------
print('-------------------------------------------------')
print('WARNING: Zero, negative or NaN slopes found.')
print(' Replacing them with smallest slope.')
print(' Use "new_slopes.py" instead.')
S_min = self.slope[ good ].min()
S_max = self.slope[ good ].max()
print(' min(S) = ' + str(S_min))
print(' max(S) = ' + str(S_max))
print('-------------------------------------------------')
print(' ')
self.slope[ bad ] = S_min
#--------------------------------
# Convert data type to double ?
#--------------------------------
if (FLOAT):
self.slope = np.float32(self.slope)
else:
self.slope = | np.float64(self.slope) | numpy.float64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.