prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Date: Arg 8, 2020
"""
# import necessary packages
import requests
import time
import numpy as np
import sys
import re
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import xml
import pandas as pd
def read_url(url: str, driver_path: str):
"""
Read the website and return the contents of the website
:param url: The url of the website
:param driver_path: The path of the Google Chrome Driver
:return soup.text: The contents of the website
"""
start_time = time.time()
option = webdriver.ChromeOptions()
option.add_argument(
'user-agent="MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) '
'AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"')
option.add_argument(
'user-agent="Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) '
'Version/9.0 Mobile/13B143 Safari/601.1"')
option.add_argument('--disable-infobars')
option.add_argument('--incognito')
option.add_argument('headless')
option.add_argument("--start-maximized")
option.add_argument('blink-settings=imagesEnabled=false')
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities["pageLoadStrategy"] = "none"
prefs = {
"profile.managed_default_content_settings.images": 2,
'profile.default_content_settings.popups': 0
}
option.add_experimental_option("prefs", prefs)
# option.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# PROXY = "proxy_host:proxy:port"
# desired_capabilities = option.to_capabilities()
# desired_capabilities['proxy'] = {
# "httpProxy": PROXY,
# "ftpProxy": PROXY,
# "sslProxy": PROXY,
# "noProxy": None,
# "proxyType": "MANUAL",
# "class": "org.openqa.selenium.Proxy",
# "autodetect": False
# }
# If you don't have Google Chrome Driver installed, uncomment this line
# driver_path = ChromeDriverManager().install()
driver = webdriver.Chrome(driver_path, chrome_options=option, desired_capabilities=desired_capabilities)
# driver.implicitly_wait(0.1)
driver.get(url)
contents = driver.page_source
START = contents.find('serverContent')
END = contents.find('QRcodebox')
contents_cut = contents[START:END]
end_time = time.time()
print('Time used for get the website was %7f' % (end_time - start_time))
return contents, contents_cut, driver
def find_English_term(content: str):
"""
Find the English Term from the contents
:param content: The contents of the website
:return Eng_term: The found English term
:return content: The contents that cut the English term part
"""
mark = content.find('detail_content')
temp_cont = content[mark-100:mark]
START = temp_cont.find('">')
END = temp_cont.find('</a></h3>')
Eng_term = temp_cont[START+2:END]
content = content[mark+len('detail_content'):]
return Eng_term, content
def find_Chinese_term(content: str):
"""
Find the Chinese Term from the contents
:param content: The contents of the website
:return Chi_term: The found Chinese Term
:return content: The contents that cut the Chinese term part
"""
if '中文名称' not in content:
Chi_term = ''
else:
mark = content.find('target')
temp_cont = content[mark:mark+100]
START = temp_cont.find('target')
END = temp_cont.find('</a>')
Chi_term = temp_cont[START+len('target="_blank">'):END]
chi_loc = content.find(Chi_term)
content = content[chi_loc+len(Chi_term):]
return Chi_term, content
def find_English_definition(content: str):
"""
Find the English Definition from the content
:param content: The contents of the website
:return Eng_def: The found English definition
:return content: The contents that cut the English definition part
"""
if '释义' not in content:
Eng_def = ''
else:
START = content.find('释义')
END = content.find('</i>')
Eng_def = content[START+len('释义:<span><i>'):END]
content = content[END+len('</i></span></div>'):]
return Eng_def, content
def synonym(content: str):
"""
Find all the Synonym words w.r.t. the English term
:param content: The contents of the website
:return synonym_words: The found synonym words
"""
if '同义词' not in content:
synonym_words = ''
else:
START = content.find('同义词')
END = content.find('范畴')
main_content = content[START:END]
key_word = 'target'
synonym_words = []
cur_content = main_content
while key_word in cur_content:
start = cur_content.find('target') + len('target')
ite_content = cur_content[start:start+100]
new_start = ite_content.find(">")
end = ite_content.find('</a></span>')
synonym_word = ite_content[new_start+1:end]
synonym_words.append(synonym_word)
cur_content = cur_content[start+1:]
synonym_words = np.array(synonym_words)
synonym_words = np.squeeze(synonym_words)
synonym_words = str(synonym_words).replace('[', '')
synonym_words = [str(synonym_words).replace(']', '')]
content = content[END:]
return synonym_words, content
def field(content: str):
"""
Find and save all the Fields of this particular term
:param content: The contents of the website
:return content: The Fields contents
"""
if '范畴' not in content:
field = ''
else:
content.replace("title=""", '')
START = content.find('target') + len('target')
content = content[START:]
field = []
new_content = content
while 'title' in new_content:
start = new_content.find('title=') + len('title=')
end = new_content.find('><span')
temp_field = new_content[start+1:end-1]
if temp_field != '':
field.append(temp_field)
new_content = new_content[start:]
field = np.array(field)
field = np.squeeze(field)
field = str(field).replace('[', '')
field = [str(field).replace(']', '')]
return field
# The main function
if __name__ == "__main__":
index = 1
English_terms = []
Chinese_terms = []
English_definition = []
Synonym_words = []
Fileds_summary = []
start = '0'
end = '100w'
save_file = start + '-' + end
start_index = int(0)
end_index = int(1000000)
for i in range(start_index, end_index):
if i < 10:
i = '00000' + str(i)
elif 10 <= i < 100:
i = '0000' + str(i)
elif 100 <= i < 1000:
i = '000' + str(i)
elif 1000 <= i < 10000:
i = '00' + str(i)
elif 10000 <= i < 100000:
i = '0' + str(i)
else:
i = str(i)
url = 'https://www.nstl.gov.cn/stkos_detail.html?id=C019' + i
driver_path = '/Users/shuyuej/.wdm/drivers/chromedriver/mac64/84.0.4147.30/chromedriver'
save_path = 'NSTD_data/'
contents, contents_cut, driver = read_url(url=url, driver_path=driver_path)
if '暂无相关资源' in contents:
print('There is no data in this webpage! Skip and continue......')
continue
else:
Eng_term, con_cut_eng = find_English_term(content=contents_cut)
English_terms.append([Eng_term])
Chi_term, con_cut_chi = find_Chinese_term(content=con_cut_eng)
Chinese_terms.append([Chi_term])
Eng_def, con_cut_def = find_English_definition(content=con_cut_chi)
English_definition.append([Eng_def])
synonym_word, synonym_cut_con = synonym(content=con_cut_def)
Synonym_words.append([synonym_word])
fields = field(content=synonym_cut_con)
Fileds_summary.append([fields])
index += 1
print('It\'s ' + str(i) + ' Website, saved its data, and continue......')
rows = np.shape(English_terms)[0]
English_terms = | np.reshape(English_terms, [rows, 1]) | numpy.reshape |
##############################
#
# IMPORTS
#
##############################
# Misc
import os
from matplotlib import pyplot as plt
from IPython.display import clear_output
import sys
import h5py
import numpy as np
import pickle
# NN
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Activation
from keras import backend as K
from sklearn.model_selection import train_test_split
# Tensorboard
import time
from tensorflow.python.keras.callbacks import TensorBoard
# Weight Checkpoints
from keras.callbacks import ModelCheckpoint
# Move directories
import shutil
# debuging
import ipdb
# ipdb.set_trace()
# Print progress
from decimal import Decimal
##############################
#
# Plot Losses Callback
#
##############################
class PlotLosses(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
# Reshape input vector to fit on graph
def reshapeVector(vec):
l = len(vec)
L = epochs - l
if L>=0:
tail = np.ones((L), dtype = int) * vec[-1]
vec = np.hstack((vec,tail))
return vec
# Load data to compare with
if compareResultsDuringTraining:
self.compareData = load_obj('Results/' + compareWith, 'fitHistory')
self.compAcc = reshapeVector(self.compareData['acc'])
self.compValAcc = reshapeVector(self.compareData['val_acc'])
self.compLoss = reshapeVector(self.compareData['loss'])
self.compValLoss = reshapeVector(self.compareData['val_loss'])
self.i = 0
self.x = []
self.loss = []
self.val_loss = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = {'acc':[], 'val_acc':[], 'loss':[], 'val_loss':[]}
self.saveDir = 'Results/' + str(resID) + '/fitTemp/'
def on_epoch_end(self, epoch, logs={}):
self.x.append(self.i)
self.loss.append(logs['loss'])
self.val_loss.append(logs['val_loss'])
self.acc.append(logs['acc'])
self.val_acc.append(logs['val_acc'])
self.logs = {'acc':self.acc, 'val_acc':self.val_acc, 'loss':self.loss, 'val_loss':self.val_loss}
self.i += 1
clear_output(wait=True)
# Create plots
f = plt.figure(figsize=(15,7))
ax = f.add_subplot(121)
ax2 = f.add_subplot(122)
# Plot Loss
ax.plot(self.x, self.loss, color='blue', label="Train", linewidth = 1)
ax.plot(self.x, self.val_loss, color='deepskyblue', label="Validation", linewidth = 1)
if compareResultsDuringTraining:
ax.plot(self.x, self.compLoss[:len(self.loss)], color='black', label=compareWith + " Training", linewidth = 1)
ax.plot(self.x, self.compValLoss[:len(self.loss)], color='gray', label=compareWith + " Validation", linewidth = 1)
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.legend()
ax.set_ylim(bottom=0)
ax.grid(True)
# Plot Accuracy
ax2.plot(self.x, self.acc, 'b-', label="Train", linewidth = 1)
ax2.plot(self.x, self.val_acc, color = 'deepskyblue', label="Validation", linewidth = 1)
if compareResultsDuringTraining:
ax2.plot(self.x, self.compAcc[:len(self.acc)], color='black', label=compareWith + " Training", linewidth = 1)
ax2.plot(self.x, self.compValAcc[:len(self.acc)], color='silver', label=compareWith + " Validation", linewidth = 1)
ax.set
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracty')
ax2.legend()
ax2.set_ylim(top=1)
ax2.grid(True)
# Show and save plot
# plt.tight_layout()
plt.savefig(self.saveDir + 'currentAccAndLoss')
plt.show();
# print results
print("Train Accuracy of last epoch: ", logs['acc'])
print("Validation Accuracy of last epoch: ", logs['val_acc'])
print("Train Loss of last epoch: ", logs['loss'])
print("Validation Loss of last epoch: ", logs['val_loss'])
# # Plot Loss
# plt.subplot(1,2,1)
# plt.figure(figsize=(8,8))
# plt.plot(self.x, self.loss, 'b-', label="Train", linewidth = 1)
# plt.plot(self.x, self.val_loss, 'r-', label="Validation", linewidth = 1)
# plt.plot(self.x, self.compLoss[:len(self.loss)], 'b--', label=compareWith + " Training")
# plt.plot(self.x, self.compValLoss[:len(self.loss)], 'r--', label=compareWith + " Validation")
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.legend()
# plt.ylim(bottom=0)
# plt.grid(True)
# # plt.savefig('fitTemp/currentLoss')
# # plt.show();
# # Plot Accuracy
# plt.subplot(1,2,2)
# plt.figure(figsize=(8,8))
# plt.plot(self.x, self.acc, 'b-', label="Train", linewidth = 1)
# plt.plot(self.x, self.val_acc, 'r-', label="Validation", linewidth = 1)
# plt.plot(self.x, self.compAcc[:len(self.acc)], 'b--', label=compareWith + " Training")
# plt.plot(self.x, self.compValAcc[:len(self.acc)], 'r--', label=compareWith + " Validation")
# plt.xlabel('Epochs')
# plt.ylabel('Accuracty')
# plt.legend()
# plt.ylim(top=1)
# plt.grid(True)
# # Show and save plot
# # plt.tight_layout()
# # plt.savefig('fitTemp/currentAccAndLoss')
# plt.show();
with open(self.saveDir + 'logs.txt','w') as file:
file.write(str(self.logs))
with open(self.saveDir + 'atEpochNr.txt','w') as file:
file.write(str(epoch))
plot_losses = PlotLosses()
##############################
#
# Misc Functions
#
##############################
def calcScore(model):
print("Calculating score")
score = model.evaluate(X_test, y_test, verbose=1)
print(X_train.shape)
print('Evaluated test loss:', score[0])
print('Evaluated test accuracy:', score[1])
return score
def calcScoreBigData(model):
print("Calculating score")
score = np.array([.0,.0])
t0 = time.time() # start time
t1 = t0 # last print
i1 = i2 = 0
for X_train, X_test, y_train, y_test, percDone, loadLength in genData(test_size = 0, yieldSize = yieldSize):
score += np.array(model.evaluate(X_train, y_train, verbose=0))
t2 = time.time()
tSinceLastPrint = t2 - t1
i2 += 1
if tSinceLastPrint > tPrintInterval:
printProgress(t0, t1, t2, i1, i2, loadLength//yieldSize)
t1 = time.time()
i1 = i2
score = score/(i2 + 1)
print()
print('Evaluated loss:', score[0])
print('Evaluated accuracy:', score[1])
return score
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
def askAbortIfPathExists(fileName):
if askForConfirmation:
if os.path.exists(fileName):
a = input("Error, file/directory {} exists, continue? [y/n]".format(fileName))
if a[0] != "y" and a[0] != "Y":
sys.exit()
def createDir(dir, confirm = True):
if os.path.exists(dir):
askAbortIfPathExists(dir)
else:
os.makedirs(dir)
def save_obj(saveDir, saveName, obj ):
if not os.path.exists(saveDir):
os.makedirs(saveDir)
fileName = saveDir + '/'+ saveName + '.pkl'
askAbortIfPathExists(fileName)
with open(fileName, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(dir, fileName ):
with open(dir + '/' + fileName + '.pkl', 'rb') as f:
return pickle.load(f)
def sq2hnit(sq):
col = sq%8
row = (sq - col)//8
return col,row
# 0: pawns
# 1: kings
def vecSt2fullSt(vecSt, nPi, nPa, nWPa):
fullSt = np.zeros((4,8,8), dtype = 'bool')
for i in range(nPi - 2):
sq = vecSt[i]
col,row = sq2hnit(sq)
if i < nWPa:
fullSt[0][row][col] = True
else:
fullSt[1][row][col] = True
col,row = sq2hnit(vecSt[-2])
fullSt[2][row][col] = True
col,row = sq2hnit(vecSt[-1])
fullSt[3][row][col] = True
return fullSt
def vecSt2fullSt_8x8x2(vecSt, nPi, nPa, nWPa):
fullSt = np.zeros((8,8,2), dtype = 'int8')
for i in range(nPi - 2):
sq = vecSt[i]
col,row = sq2hnit(sq)
if i < nWPa:
fullSt[row][col][0] = 1
else:
fullSt[row][col][0] = -1
col,row = sq2hnit(vecSt[-2])
fullSt[row][col][1] = 1
col,row = sq2hnit(vecSt[-1])
fullSt[row][col][1] = -1
return fullSt
# count nr of each score instance
# wdlCounter placeholders: [-2, -1, 0, 1 ,2]
def wdlCountingMachine(ds):
wdlCounter = [0,0,0,0,0]
l = len(ds)
i = 0
intv = l//100
for wdl in ds:
i += 1
if i%intv == 0:
sys.stdout.write(str((i*100)//l) + " percentage")
sys.stdout.write('\r')
sys.stdout.flush()
wdlCounter[wdl[0] + 2] += 1
print(wdlCounter)
return wdlCounter
# wdlCountingMachine(d3t)
##############################
#
# Gen DATA
#
##############################
def genData(randomState = 42, test_size = 0.33, yieldSize = 1000):
with h5py.File(fileName, 'r') as f:
d = f[dataSetName]
dt = f[dataSetWdlName]
l = len(d)
loadLength = int(l * fractionOfDataToUse)
if convertStates:
sys.exit("loadDataGenerator can't convert states, aborting.")
for i in range(0,loadLength, yieldSize):
if i + yieldSize > loadLength:
ys = loadLength - i
else:
ys = yieldSize
X = d[i: i+ys]
y = dt[i: i+ys]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=randomState)
del X, y
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Percentage done
percDone = round(100*i/loadLength, 3)
yield X_train, X_test, y_train, y_test, percDone, loadLength
##############################
#
# LOAD DATA
#
##############################
# load datasets
def loadData(randomState = 42, test_size = 0.33):
with h5py.File(fileName, 'r') as f:
d = f[dataSetName]
dt = f[dataSetWdlName]
l = len(d)
loadLength = int(l * fractionOfDataToUse)
if convertStates:
X = np.array([vecSt2fullSt(vecSt,nPi, nPa, nWPa) for vecSt in d[:loadLength]])
else:
X = d[:loadLength]
y = dt[:loadLength]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=randomState)
del X
del y
print('X_train shape:', X_train.shape)
print('y_train shape:', y_train.shape)
print('X_test shape:', X_test.shape)
print('y_test shape:', y_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print("Done loading dataset")
return X_train, X_test, y_train, y_test
##############################
#
# CREATE MODEL
#
##############################
def createModel():
# import keras.backend as K
# K.set_floatx('float16')
# K.set_epsilon(1e-4) #default is 1e-7
# K.set_floatx('float32')
# K.set_epsilon(1e-7) #default is 1e-7
model = Sequential()
nnStr = ''
for i in range(len(filters)):
s = str(filterShape[i])
filter = str(filters[i])
nnStr += s + 'x' + filter + '-'
nnStr = nnStr[:-1]
assert (len(filters) == len(filterShape)),"Error, len(filters) != len(filterShape)"
if useBatchNorm:
for i in range(len(filters)):
if i == 0:
model.add(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padding='valid',
data_format = "channels_first",
use_bias = False,
# kernel_initializer =
input_shape=input_shape))
else:
model.add(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
use_bias = False,
padding='valid'))
model.add(BatchNormalization())
model.add(Activation("relu"))
else:
for i in range(len(filters)):
if i == 0:
model.add(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padding='valid',
activation='relu',
data_format = "channels_first",
# kernel_initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0, seed=None),
input_shape=input_shape))
else:
model.add(Conv2D(filters[i], kernel_size=(filterShape[i], filterShape[i]),
padding='valid',
# kernel_initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0, seed=None),
activation='relu'))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
if multiGPU:
model = keras.utils.multi_gpu_model(model, gpus=2)
model.summary()
if loadWeights:
if loadCheckpointWeights:
if weightsCheckpoint[:-5] == '.hdf5':
weightsPath = 'Results/' + weightsSource + '/weightsCheckpoints/' + weightsCheckpoint
else:
weightsPath = 'Results/' + weightsSource + '/weightsCheckpoints/' + weightsCheckpoint + '.hdf5'
else:
weightsPath = 'Results/' + weightsSource + '/weights.hdf5'
print("Loading weights from {}".format(weightsPath))
model.load_weights(weightsPath)
else:
print("Starting with random weights")
if optimizer == "Adadelta":
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
elif optimizer == 'Adam':
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
else:
sys.exit("Error, invalid optimizer.")
print("Done creating model")
return model, nnStr
##############################
#
# TRAIN MODEL
#
##############################
def trainModel(resID, model, saveWeightsCheckpoints = True, saveTensorBoardLogs = True):
# prepp callbacks arr
callbacksArr = []
if plotDuringTraining:
callbacksArr.append(plot_losses)
if saveTensorboardLogs:
logDir = './logs/{}-{}pc-{}-{}KPM-{}'.format(resID,nPi, initWeightsId, kpm, expDescr, dateTime )
callbacksArr.append(keras.callbacks.TensorBoard(log_dir=logDir))
# save weight checkpoint
if saveWeightsCheckpoints:
saveWeigthsPath = "Results/" + resID + '/weightsCheckpoints/'
print("Saving weights to {}".format(saveWeigthsPath))
createDir(saveWeigthsPath)
filepath = saveWeigthsPath + "weights-checkp-{epoch:03d}-{val_acc:.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacksArr.append(checkpoint)
kpm = model.count_params()//1000
dateTime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
# Load weights
if loadWeights:
initWeightsId = weightsSource
else:
initWeightsId = 'RND'
fitHistory = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks = callbacksArr,
# .format(resID,nPi, initWeightsId, kpm, int(time() - 1500000000)))],
validation_data=(X_test, y_test))
print("Training done")
if saveTensorBoardLogs:
return fitHistory, logDir
else:
return fitHistory, None
##############################
#
# SAVE RESULTS
#
##############################
def genNextResultsDir(model, resID = None):
if resID == None:
#Get next resID
with open('Results/lastResId.txt','r') as file:
lastId = file.read()
resID = str(int(lastId) + 1).zfill(3)
#Iterate resID
with open('Results/lastResId.txt','w') as file:
file.write(resID)
# Generate save dir
saveDir = 'Results/' + str(resID) + '/'
print('Save dir: ' + saveDir)
print("Creating save dir")
createDir(saveDir, confirm = True)
# Save info directories
if loadWeights:
initWeightsId = weightsSource
else:
initWeightsId = 'RND'
kpm = str(model.count_params()//1000) + 'kpm'
createDir(saveDir + '_' + '_0.experimentDesc-------' + str(expDescr))
createDir(saveDir + '_' + '_1.numberOfPieces-------' + str(nPi))
createDir(saveDir + '_' + '_2.neuralNetStructure---' + str(nnStr))
createDir(saveDir + '_' + '_3.loadedWeightsFrom----' + str(initWeightsId))
createDir(saveDir + '_' + '_5.batchSize------------' + str(batch_size))
createDir(saveDir + '_' + '_6.optimizer------------' + str(optimizer))
createDir(saveDir + '_' + '_7.nrOfparameters-------' + str(kpm))
createDir(saveDir + '_' + '_9.multiGPU-------------' + str(multiGPU))
createDir(saveDir + 'fitTemp')
with open(saveDir + 'fitTemp/startTime.txt', 'w') as file:
file.write(str(time.time()))
print("Done generating results dir {}".format(saveDir))
return resID
def saveTrainResults(resID, model, logDir, score, copyFirstNLayers = None):
print("Saving results to dir {}".format(resID))
saveDir = 'Results/' + str(resID) + '/'
ep = len(model.history.history['acc'])
createDir(saveDir + '_' + '_4.epochs---------------' + str(ep) + '_of_' + str(epochs) )
createDir(saveDir + '_' + '_8.finalAccuracy--------' + str(round(score[1],3)))
if copyFirstNLayers != None:
createDir(saveDir + '_' + '_11.copyFirstNLayers----' + str(copyFirstNLayers))
#save history
print("Saving history...")
hist = model.history.history
saveName = 'fitHistory'
save_obj(saveDir, saveName, hist)
#save weights
print("Saving weights...")
fileName = saveDir + 'weights.hdf5'
askAbortIfPathExists(fileName)
model.save_weights(fileName)
#save figures
print("Saving figures...")
acc = hist['acc']
loss = hist['loss']
val_acc = hist['val_acc']
val_loss = hist['val_loss']
x = [i for i in range(len(acc))]
# Create plots
f = plt.figure(figsize=(15,7))
ax = f.add_subplot(121)
ax2 = f.add_subplot(122)
# Plot Loss
ax.plot(x, loss, color='blue', label="Train", linewidth = 1)
ax.plot(x, val_loss, color='deepskyblue', label="Validation", linewidth = 1)
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.legend()
ax.set_ylim(bottom=0)
ax.grid(True)
# Plot Accuracy
ax2.plot(x, acc, 'b-', label="Train", linewidth = 1)
ax2.plot(x, val_acc, color = 'deepskyblue', label="Validation", linewidth = 1)
ax.set
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracty')
ax2.legend()
ax2.set_ylim(top=1)
ax2.grid(True)
# Save plots
plt.savefig(saveDir + 'performance')
plt.show();
#save summary
print("Saving summary...")
from contextlib import redirect_stdout
fileName = saveDir + 'modelsummary.txt'
askAbortIfPathExists(fileName)
with open(fileName, 'w') as f:
with redirect_stdout(f):
model.summary()
# Save tensorboard logs
print("Saving tensorboard logs...")
saveDir = 'Results/' + str(resID) + '/' + logDir[7:]
logDir = logDir
copyDirectory(logDir, saveDir)
# Calc and save total time
saveDir = 'Results/' + str(resID) + '/'
with open(saveDir + 'fitTemp/startTime.txt', 'r') as file:
startTime = float(file.read())
endTime = time.time()
totalTime = endTime - startTime
if totalTime >3600*24:
totalTime = str(round(totalTime/(3600*24), 3)) + ' days'
elif totalTime >3600:
totalTime = str(round(totalTime/(3600), 3)) + ' hours'
elif totalTime >60:
totalTime = str(round(totalTime/(60), 3)) + ' minutes'
else:
totalTime = str(round(totalTime, 3)) + ' seconds'
createDir(saveDir + '_' + '_10.totalTime-----------' + str(totalTime))
print("All done saving stuff!")
##############################
#
# COMPARE RESULTS
#
##############################
def compareResults(res1, res2, label1 = '', label2 = '', metric1 = 'acc', metric2 = 'acc', saveFigName = '', makeEqual = False):
# Reshape input vector to fit on graph
def makeEqualLength(vec1, vec2):
l1 = len(vec1)
l2 = len(vec2)
if l1 == l2:
pass
elif l1 > l2:
l = l1 - l2
tail = np.ones((l), dtype = int) * vec2[-1]
vec2 = | np.hstack((vec2,tail)) | numpy.hstack |
import cv2
import numpy as np
import os
import time
import os.path as osp
import numpy as np
from PIL import Image
import random
import cv2
from torch.utils import data
import pickle
import random
Image.MAX_IMAGE_PIXELS = 2300000000
class SeedDataSet(data.Dataset):
def __init__(self, root='', list_path='', max_iters=None, transform=None):
self.root = root
self.list_path = list_path
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
self.transform = transform
# img_name, str(y0), str(y1), str(x0), str(x1)))
for name in self.img_ids:
img_name, msk_name, y0, y1, x0, x1 = name.split()
img_file = osp.join(self.root, img_name)
msk_file = osp.join(self.root, msk_name)
self.files.append({
"img": img_file,
"mask": msk_file,
"name": name,
"y0": int(y0),
"y1": int(y1),
"x0": int(x0),
"x1": int(x1)
})
print("length of train set: ", len(self.files))
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(datafiles["img"]).convert('RGB')
image = image.crop((datafiles["x0"], datafiles["y0"], datafiles["x1"], datafiles["y1"]))
mask = cv2.imread(datafiles["mask"],
cv2.IMREAD_GRAYSCALE)[datafiles["y0"]: datafiles["y1"], datafiles["x0"]: datafiles["x1"]]
size = image.size
name = datafiles["name"]
if self.transform is not None:
image = self.transform(image)
mask[mask < 128] = 0
mask[mask > 128] = 1
return image, mask.copy(), | np.array(size) | numpy.array |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['Channelizer']
import os
import platform
from copy import deepcopy
import numpy as np
import scipy.signal
from .filter import Filter
from auspex.parameter import Parameter, IntParameter, FloatParameter
from auspex.stream import DataStreamDescriptor, InputConnector, OutputConnector
from auspex.log import logger
try:
# load libchannelizer to access Intel IPP filtering functions
import numpy.ctypeslib as npct
from ctypes import c_int, c_size_t
np_float = npct.ndpointer(dtype=np.float32, flags='C_CONTIGUOUS')
libchannelizer_path = os.path.abspath(os.path.join( os.path.dirname(__file__), "libchannelizer"))
if "Windows" in platform.platform():
os.environ["PATH"] += ";" + libchannelizer_path
libipp = npct.load_library("libchannelizer", libchannelizer_path)
libipp.filter_records_fir.argtypes = [np_float, c_size_t, c_int, np_float, c_size_t, c_size_t, np_float]
libipp.filter_records_iir.argtypes = [np_float, c_size_t, np_float, c_size_t, c_size_t, np_float]
libipp.init()
load_fallback = False
except:
logger.warning("Could not load channelizer library; falling back to python methods.")
load_fallback = True
class Channelizer(Filter):
"""Digital demodulation and filtering to select a particular frequency multiplexed channel. If
an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency
`axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that
the filter coefficients are still calculated with respect to the `frequency` paramter, so it should
be chosen accordingly when `follow_axis` is defined."""
sink = InputConnector()
source = OutputConnector()
follow_axis = Parameter(default="") # Name of the axis to follow
follow_freq_offset = FloatParameter(default=0.0) # Offset
decimation_factor = IntParameter(value_range=(1,100), default=4, snap=1)
frequency = FloatParameter(value_range=(-10e9,10e9), increment=1.0e6, default=10e6)
bandwidth = FloatParameter(value_range=(0.00, 100e6), increment=0.1e6, default=5e6)
def __init__(self, frequency=None, bandwidth=None, decimation_factor=None,
follow_axis=None, follow_freq_offset=None, **kwargs):
super(Channelizer, self).__init__(**kwargs)
if frequency:
self.frequency.value = frequency
if bandwidth:
self.bandwidth.value = bandwidth
if decimation_factor:
self.decimation_factor.value = decimation_factor
if follow_axis:
self.follow_axis.value = follow_axis
if follow_freq_offset:
self.follow_freq_offset.value = follow_freq_offset
self.quince_parameters = [self.decimation_factor, self.frequency, self.bandwidth]
self._phase = 0.0
def final_init(self):
self.init_filters(self.frequency.value, self.bandwidth.value)
if self.follow_axis.value is not "":
desc = self.sink.descriptor
axis_num = desc.axis_num(self.follow_axis.value)
self.pts_before_freq_update = desc.num_points_through_axis(axis_num + 1)
self.pts_before_freq_reset = desc.num_points_through_axis(axis_num)
self.demod_freqs = desc.axes[axis_num].points - self.follow_freq_offset.value
self.current_freq = 0
self.update_references(self.current_freq)
self.idx = 0
# For storing carryover if getting uneven buffers
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
def update_references(self, frequency):
# store decimated reference for mix down
# phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6)
ref = np.exp(2j*np.pi * -frequency * self.time_pts[::self.d1] + 1j*self._phase, dtype=np.complex64)
self.reference = ref
self.reference_r = np.real(ref)
self.reference_i = np.imag(ref)
def init_filters(self, frequency, bandwidth):
# convert bandwidth normalized to Nyquist interval
n_bandwidth = bandwidth * self.time_step * 2
n_frequency = abs(frequency) * self.time_step * 2
# arbitrarily decide on three stage filter pipeline
# 1. first stage decimating filter on real data
# 2. second stage decimating filter on mixed product to boost n_bandwidth
# 3. final channel selecting filter at n_bandwidth/2
# anecdotally don't decimate more than a factor of eight for stability
self.decim_factors = [1]*3
self.filters = [None]*3
# first stage decimating filter
# maximize first stage decimation:
# * minimize subsequent stages time taken
# * filter and decimate while signal is still real
# * first stage decimation cannot be too large or then 2omega signal from mixing will alias
self.d1 = 1
while (self.d1 < 8) and (2*n_frequency <= 0.8/self.d1) and (self.d1 < self.decimation_factor.value):
self.d1 *= 2
n_bandwidth *= 2
n_frequency *= 2
if self.d1 > 1:
# create an anti-aliasing filter
# pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
b,a = scipy.signal.cheby1(4, 3, 0.8/self.d1)
b = np.float32(b)
a = np.float32(a)
self.decim_factors[0] = self.d1
self.filters[0] = (b,a)
# store decimated reference for mix down
self.update_references(frequency)
# second stage filter to bring n_bandwidth/2 up
# decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
self.d2 = 1
while (self.d2 < 8) and ((self.d1*self.d2) < self.decimation_factor.value) and (n_bandwidth/2 <= 0.8):
self.d2 *= 2
n_bandwidth *= 2
n_frequency *= 2
if self.d2 > 1:
# create an anti-aliasing filter
# pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
b,a = scipy.signal.cheby1(4, 3, 0.8/self.d2)
b = np.float32(b)
a = np.float32(a)
self.decim_factors[1] = self.d2
self.filters[1] = (b,a)
# final channel selection filter
if n_bandwidth < 0.1:
raise ValueError("Insufficient decimation to achieve stable filter: {}.".format(n_bandwidth))
b,a = scipy.signal.cheby1(4, 3, n_bandwidth/2)
b = np.float32(b)
a = np.float32(a)
self.decim_factors[2] = self.decimation_factor.value // (self.d1*self.d2)
self.filters[2] = (b,a)
def update_descriptors(self):
logger.debug('Updating Channelizer "%s" descriptors based on input descriptor: %s.', self.filter_name, self.sink.descriptor)
# extract record time sampling
self.time_pts = self.sink.descriptor.axes[-1].points
self.record_length = len(self.time_pts)
self.time_step = self.time_pts[1] - self.time_pts[0]
logger.debug("Channelizer time_step = {}".format(self.time_step))
# We will be decimating along a time axis, which is always
# going to be the last axis given the way we usually take data.
# TODO: perform this function along a named axis rather than a numbered axis
# in case something about this changes.
# update output descriptors
decimated_descriptor = DataStreamDescriptor()
decimated_descriptor.axes = self.sink.descriptor.axes[:]
decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[-1].points[self.decimation_factor.value-1::self.decimation_factor.value]
decimated_descriptor.axes[-1].original_points = decimated_descriptor.axes[-1].points
decimated_descriptor._exp_src = self.sink.descriptor._exp_src
decimated_descriptor.dtype = np.complex64
self.source.descriptor = decimated_descriptor
self.source.update_descriptors()
def process_data(self, data):
# Append any data carried from the last run
if self.carry.size > 0:
data = np.concatenate((self.carry, data))
# This is the largest number of records we can handle
num_records = data.size // self.record_length
# This is the carryover that we'll store until next round.
# If nothing is left then reset the carryover.
remaining_points = data.size % self.record_length
if remaining_points > 0:
if num_records > 0:
self.carry = data[-remaining_points:]
data = data[:-remaining_points]
else:
self.carry = data
else:
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
if num_records > 0:
# The records are processed in parallel after being reshaped here
reshaped_data = np.reshape(data, (num_records, self.record_length), order="C")
# Update demodulation frequency if necessary
if self.follow_axis.value is not "":
freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset) // self.pts_before_freq_update]
if freq != self.current_freq:
self.update_references(freq)
self.current_freq = freq
self.idx += data.size
# first stage decimating filter
if self.filters[0] is None:
filtered = reshaped_data
else:
stacked_coeffs = np.concatenate(self.filters[0])
# filter
if np.iscomplexobj(reshaped_data):
# TODO: compile complex versions of the IPP functions
filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, np.ascontiguousarray(reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered_r)
libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, np.ascontiguousarray(reshaped_data.imag.astype(np.float32)), self.record_length, num_records, filtered_i)
filtered = filtered_r + 1j*filtered_i
# decimate
if self.decim_factors[0] > 1:
filtered = filtered[:, ::self.decim_factors[0]]
else:
filtered = np.empty_like(reshaped_data, dtype=np.float32)
libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, np.ascontiguousarray(reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered)
# decimate
if self.decim_factors[0] > 1:
filtered = filtered[:, ::self.decim_factors[0]]
# mix with reference
# keep real and imaginary separate for filtering below
if | np.iscomplexobj(reshaped_data) | numpy.iscomplexobj |
import csvreader
import numpy as np
import matplotlib.pyplot as plt
header, player_data = csvreader.csv_reader_with_headers('../data/Player_full_data.csv')
for player in player_data:
plt.style.use('ggplot')
values = [float(player[5]), float(player[10]), float(player[14]), float(player[18])]
feature = ['Body', 'Defense', 'Pass', 'Shot']
N = len(values)
angles = | np.linspace(0, 2*np.pi, N, endpoint=False) | numpy.linspace |
# Author: <NAME>
# Contact: <EMAIL>
# Version: 1.0.9
# Last modified: 09-03-2021 by <NAME>
"""Base class to be inherited for classification and regression tasks."""
import time
import warnings
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_random_state
from itea._manipulators.generator import uniform
from itea._manipulators.mutation import mutate_individual
from itea._manipulators.sanitizer import sanitize
from itea.inspection import ITExpr_explainer
import itea._manipulators.simplifier as simplifiers
class BaseITEA(BaseEstimator):
"""Base class to be inherited for classification and regression tasks.
This class implements argument checks and generic evolutionary methods
(population initialization, selection, mutation, and evolution), along with
three virtual methods to be implemented.
Ideally, this class should never be instantiated, only its derivations.
Its derivations will be scikit estimators and can be used in many scikit
methods such as grid-search or pipelines.
Every argument is a named argument. The list of arguments includes
everything that an ``ITExpr`` class needs to be instantiated.
All arguments have a default value. In this configuration, the
evolutionary process will search only for polynomials.
"""
def __init__(self, *,
gens = 100,
popsize = 100,
expolim = (-2, 2),
max_terms = 5,
simplify_method = None,
random_state = None,
verbose = None,
labels = [],
tfuncs = {'id': lambda x: x},
tfuncs_dx = None,
predictor_kw = None
):
"""Constructor method.
Parameters
----------
gens : int, default=100
number of generations of the evolutionary process. The
algorithm does not implement an early stop mechanism, so
it is guaranteed that the algorithm will perform the exact
number of generations.
popsize : int, default=100
population size, consistent through each generation.
expolim : tuple (int, int), default = (-2, 2)
tuple containing two integers, specifying the bounds
of exponents that can be explored through the evolution.
max_terms : int, default=5
the max number of IT terms allowed.
simplify_method : string or None, default=None
String with the name of the simplification method to be used
before fitting expressions through the evolutionary process.
When set to None, the simplification step is disabled.
Simplification can impact performance. To be simplified, the
expression must be previously fitted. After the simplification, if
the expression was changed, it should be fitted again to better
adjust the coefficients and intercept to the new IT expressions'
structure.
random_state : int, None or numpy.random_state, default=None
int or numpy random state. Use this argument
to have reproducible results across different
executions. When None, a random state instance
will be created and used and can be accessed
by ``itea.random_state``.
verbose : int, None or False, default=None
specify if the algorithm should perform the evolution
silently or if it should print information through the
process. When verbose is None, False, or 0, the algorithm
will not print any information. If verbose is an integer
``n``, then every ``n`` generations the algorithm will
print the status of the generation. If verbose is set
to -1, every generation information will be printed.
labels : list of strings, default=[]
(``ITExpr`` parameter) list containing the labels of the
data that will be used in the evolutionary process, and
will be used in ``ITExpr`` constructors.
tfuncs : dict, default={'id': lambda x: x}
(``ITExpr`` parameter) transformations functions to be
used when creating ``ITExpr`` 's during the
evolutionary process. Should always be a dict where the
keys are the names of the transformation functions and
the values are unary vectorized functions (for example,
numpy functions). For user-defined functions, see
numpy.vectorize for more information on how to vectorize
your transformation functions. Defaults to a dict with
only the identity function.
tfuncs_dx : dict, default=None
(ITExpr_explainer parameter) derivatives of the
given transformations functions, following the same scheme:
a dictionary where the key is the name of the function
(should have the derivatives of every function in
tfuncs) and the value is a vectorized function
representing its derivative. When set to None, the
itea package will use automatic differentiation
through jax to create the derivatives.
predictor_kw : dict or None, default = None
dictionary with parameters to pass as named arguments
to the constructor method in the ``BaseITExpr`` subclass.
If none is given, then a empty dict will be used.
"""
self.gens = gens
self.popsize = popsize
self.max_terms = max_terms
self.expolim = expolim
self.tfuncs = tfuncs
self.tfuncs_dx = tfuncs_dx
self.random_state = random_state
self.labels = labels
self.verbose = verbose
self.simplify_method = simplify_method
self.predictor_kw = predictor_kw
# This should always be none for the base class, so the default
# fitness function for each task (regression/classification) is
# correctly used.
self.fitness_f = None
def _check_args(self, X, y):
"""This method provides a simple verification of the arguments to be
used as a baseline.
The sub-classes of the BaseITEA should implement the check_args as well.
It is important to notice that the check must be made when fitting and
should raise errors to stop the program flow if any problem is found.
The scikit recomendation is to never do checks on __init__.
Raises
------
ValueError
If one or more arguments would result in a invalid execution of
itea.
Notes
-----
As the scikit documentation suggests, no check for valid arguments is
made on the constructor. Instead, when those arguments will be used,
we then perform the checkings. **All 'private' methods (beginning with
an underscore) are designed to work after the ``check_args`` is called,
since they rely on valid parameters. Also, all of them are intended
to internal usage. When modifying, calling them directly, or testing
the private methods, you should manually call the check args.
"""
if self.expolim[1] < self.expolim[0]:
raise ValueError(
"Lower expolim bound is greater than upper bound.")
if self.max_terms < 1:
raise ValueError("max_terms should be greater or equal to 1.")
for bound in self.expolim:
if not np.issubdtype(type(bound), int):
raise ValueError(
f"the expolim bounds {bound} must be integers.")
if not np.issubdtype(type(self.max_terms), int):
raise ValueError(f"max_terms should be a int.")
if self.simplify_method is not None:
if not self.simplify_method in simplifiers.__all__:
raise ValueError(
f"simplify_method {self.simplify_method} does not exist. "
f"Available methods: {simplifiers.__all__}")
if 'id' not in list(self.tfuncs.keys()):
warnings.warn("It is necessary to provide an identity function "
"with name 'id' on the tfuncs dict, and I didn't found it. I will "
"insert ``'id' : lambda x: x`` on the dict.")
self.tfuncs['id'] = lambda x: x
self.labels = np.array([self.labels]).flatten()
if len(self.labels) != len(X[0]):
warnings.warn("The labels vector does not have the same length as "
"the number of variables in X (or was not provided). labels "
f"has length {len(self.labels)}, and X has {len(X[0])} variables. "
"labels will be generated as [x_0, x_1, ...].")
self.labels = [f'x_{i}' for i in range(len(X[0]))]
if self.predictor_kw == None:
self.predictor_kw = {}
def _create_population(
self, *, simplify_f, nvars, itexpr_class, X, y, random_state):
"""Method to create an initial population for the evolutionary process.
It will use an random expression generator that does not create
trivial expressions (where all exponents are zero).
Although, if the user has chosen an simplification method, exists the
possibility that the initial population will have fewer individuals
than the given popsize. The while loop tries to guarantee that we will
start with a clean population where all fitnessess are finite values.
"""
generator = uniform(
self.max_terms, self.expolim, self.tfuncs, nvars, random_state)
# The loop below ensures that the first population is always
# composed of valud expressions with finite fitness.
pop = []
while(len(pop) < self.popsize):
expr = sanitize(next(generator))
itexpr = itexpr_class(
expr=sanitize(expr), tfuncs=self.tfuncs, labels=self.labels,
fitness_f = self.fitness_f, **self.predictor_kw
)
with | np.errstate(all='ignore') | numpy.errstate |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (1, 10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration(self, rank=4):
default_shape = tuple(np.random.randint(1, 15, size=rank))
input_features = [('data', datatypes.Array(*default_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features=input_features,
output_features=[('output', None)],
disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [tuple(np.random.randint(1, 15, size=rank)),
tuple(np.random.randint(1, 15, size=rank))]
flexible_shape_utils.add_multiarray_ndshape_enumeration(
spec, feature_name='data', enumerated_shapes=shapes)
shapes.append(default_shape)
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration_rank3(self):
self.test_shape_flexibility_enumeration(rank=3)
def test_shape_flexibility_enumeration_rank2(self):
self.test_shape_flexibility_enumeration(rank=2)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_dynamic_weight_conv(self):
input_dim = (1, 3, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (4, 3, 3, 3)
output_dim = (1, 4, 14, 14)
kernel_channels = input_dim[0]
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('input', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='two_input_conv_layer',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
input_name=['input', 'weight'],
output_name='output')
# Assigning everything to ones should cover the execution path
# and engine failures, but is not a complete check on numerics.
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'input': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)
@pytest.mark.xfail
def test_dynamic_weight_deconv(self):
# Expect to fail in Core ML 3
input_dim = (1, 1, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (1, 1, 3, 3)
output_dim = (1, 1, 18, 18)
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('data', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='deconv',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
is_deconv=True,
input_name=['data', 'weight'],
output_name='output')
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'data': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected)
def test_batched_mat_mul_cpu(self, cpu_only=True):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input_ = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input_, expected, useCPUOnly=cpu_only,
output_name_shape_dict=shape_dict)
self.assertEqual(len(outShape), builder._get_rank('output'))
def test_batched_mat_mul_gpu(self):
self.test_batched_mat_mul_cpu(cpu_only=False)
def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_batched_mat_mul_with_transposes_gpu(self):
self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False)
def test_batched_mat_mul_single_input_cpu(self,
model_precision=_MLMODEL_FULL_PRECISION,
cpu_only=True):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=cpu_only)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION,
cpu_only=True)
def test_batched_mat_mul_single_input_gpu(self):
self.test_batched_mat_mul_single_input_cpu(model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nan_bug_cpu(self):
input_shape = [2,2]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
for axis in [0,1]:
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.array([[0.5, 0.5],[1e8, 1e8]])
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_softmax_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_softmax_nd_gpu(self):
self.test_softmax_nd_cpu(cpu_only=False)
def test_concat_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_concat_nd_gpu(self):
self.test_concat_nd_cpu(cpu_only=False)
def test_fill_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_fill_like_gpu(self):
self.test_fill_like_cpu(cpu_only=False)
def test_fill_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(shape), builder._get_rank('output'))
def test_fill_static_gpu(self):
self.test_fill_static_cpu(cpu_only=False)
def test_fill_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_fill_dynamic_gpu(self):
self.test_fill_dynamic_cpu(cpu_only=False)
def test_broadcast_to_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_broadcast_to_like_gpu(self):
self.test_broadcast_to_like_cpu(cpu_only=False)
def test_broadcast_to_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_broadcast_to_static_gpu(self):
self.test_broadcast_to_static_cpu(cpu_only=False)
def test_broadcast_to_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_broadcast_to_dynamic_gpu(self):
self.test_broadcast_to_dynamic_cpu(cpu_only=False)
# Test Rank being set to unknown when one of the input rank is unknown
# For max rank case
def test_unknown_rank(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('x', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['x', 'shape'],
output_name='y')
condition = np.random.randint(0, 2, input_shape).astype(np.float32)
builder.add_load_constant_nd(name='load_constant_condition',
output_name='condition',
constant_value=condition,
shape=input_shape)
builder.add_where_broadcastable(name='where',
input_names=['condition', 'x', 'y'],
output_name='output')
self.assertEqual(builder._get_rank('output'), -1)
def test_trigonometry_cpu(self, cpu_only=True):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=cpu_only)
def test_trigonometry_gpu(self):
self.test_trigonometry_cpu(cpu_only=False)
def test_exp2_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_exp2_gpu(self):
self.test_exp2_cpu(cpu_only=False)
def test_elementwise_binary_cpu(self, cpu_only=True):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_binary_gpu(self):
self.test_elementwise_binary_cpu(cpu_only=False)
def test_elementwise_boolean_unary_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_boolean_unary_gpu(self):
self.test_elementwise_boolean_unary_cpu(cpu_only=False)
def test_logical_not_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_logical_not_gpu(self):
self.test_logical_not_cpu(cpu_only=False)
def test_stack_cpu(self, cpu_only=True):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(input_rank + 1, builder._get_rank('output'))
def test_stack_gpu(self):
self.test_stack_cpu(cpu_only=False)
def test_ceil_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_ceil_gpu(self):
self.test_ceil_cpu(cpu_only=False)
def test_floor_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_floor_gpu(self):
self.test_floor_cpu(cpu_only=False)
def test_round_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_round_gpu(self):
self.test_round_cpu(cpu_only=False)
def test_sign_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_sign_gpu(self):
self.test_sign_cpu(cpu_only=False)
def test_clip_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_clip_gpu(self):
self.test_clip_cpu(cpu_only=False)
def test_split_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_gpu(self):
self.test_split_nd_cpu(cpu_only=False)
def test_split_nd_with_split_sizes_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_with_split_sizes_gpu(self):
self.test_split_nd_with_split_sizes_cpu(cpu_only=False)
def test_slice_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_static_gpu(self):
self.test_slice_static_cpu(cpu_only=False)
def test_slice_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = | np.random.rand(*input_shape) | numpy.random.rand |
# coding: utf-8
""" Classes for accessing simulation data for Sgr-like streams with
different mass progenitors.
"""
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
from random import sample
# Third-party
import numpy as np
import numexpr
import astropy.io.ascii as ascii
from astropy.table import Column
import astropy.units as u
from astropy.constants import G
from gary.io import SCFReader
from gary.units import usys as _usys
# Project
from .. import usys
from ..dynamics import Particle, Orbit
from ..util import streamspath
from ..coordinates.frame import galactocentric
from ..potential.lm10 import LM10Potential
from ..inference.util import guess_tail_bit, particles_x1x2x3
__all__ = ["SgrSimulation"]
class SgrSimulation(object):
def __init__(self, path, snapfile):
""" """
# potential used for the simulation
self.potential = LawMajewski2010()
# some smart pathness
if not os.path.exists(path):
_path = os.path.join(streamspath, "data", "simulation", path)
if os.path.exists(_path):
path = _path
else:
raise IOError("Path '{}' doesn't exist".format(path))
self.path = path
self.reader = SCFReader(self.path)
self.particle_table = self.reader.read_snap(snapfile, units=usys)
self.units = _usys
# get mass column from table
m = np.array(self.particle_table['m'])*self.particle_table['m'].unit
self.mass = | np.sum(m) | numpy.sum |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestSparseToDenseMask(TestCase):
def test_sparse_to_dense_mask_float(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 1, 2, 999999999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 3, 4, 5, 6, 7], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.FeedBlob('lengths', np.array([3, 4], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([[-1, 1, 3], [6, 7, -1]], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_invalid_inputs(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2])
workspace.FeedBlob(
'indices',
np.array([2000000000000, 999999999, 2, 3, 4, 5], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 3, 4, 5, 6], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.FeedBlob('lengths', np.array([6], dtype=np.int32))
try:
workspace.RunOperatorOnce(op)
except RuntimeError:
self.fail("Exception raised with only one negative index")
workspace.FeedBlob(
'indices',
np.array([2000000000000, 999999999, -2, -3, -4, -5], dtype=np.int32))
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
def test_sparse_to_dense_mask_subtensor(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 888, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 999999999, 2], dtype=np.int64))
workspace.FeedBlob(
'values',
np.array([[[1, -1]], [[2, -2]], [[3, -3]], [[4, -4]], [[5, -5]]],
dtype=np.float))
workspace.FeedBlob('default', np.array([[-1, 0]], dtype=np.float))
workspace.FeedBlob('lengths', np.array([2, 3], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([
[[[-1, 0]], [[1, -1]], [[-1, 0]], [[-1, 0]]],
[[[4, -4]], [[5, -5]], [[-1, 0]], [[3, -3]]]], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_string(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 1, 2, 999999999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
| np.array(['1', '2', '3', '4', '5', '6', '7'], dtype='S') | numpy.array |
"""
This a Random Multitone Signal Generator module. |br|
It is able to generate *N* multitone random signals according to settings
given by a user. |br|
*Examples*:
Please go to the *examples/signals* directory for examples on how to use
the generator. |br|
*Settings*:
Parameters of the generator described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the generator are attributes of the class which must/can
be set before the generator run.
Required parameters:
- a. **tS** (*float*): time of a signals
- b. **fR** (*float*): signals' representation sampling frequency
- c. **fRes** (*float*): tones frequency resolution
Optional parameters:
- d. **fMax** (*float*): maximum frequency present in signals [default = 0.4 * fR]
- e. **fMin** (*float*): minimum allowed frequency present in the spectrum [default = fRes]
- f. **nSigs** (*int*): the number of signals to be generated [default = 1]
- g. **iSNR** (*float*): level of noise in signals [dB] (SNR) [default = +int]
- h. **iP** (*float*): requested power of signals [default = do not regulate the power]
- i. **vFrqs** (*Numpy array 1D*): vector with requested frequencies of tones [default = empty]
- j. **vAmps** (*Numpy array 1D*): vector with requested amplitudes of tones [default = empty]
- k. **vPhs** (*Numpy array 1D*): vector with requested phases of tones [default = empty]
- l. **nTones** (*int*): the number of additional tones [default = 0]
- m. **iMinAmp** (*float*): min amplitude of a tone present in a signal [default = 0.1]
- n. **iGraAmp** (*float*): gradation of a random amplitude of a tone [default = 0.1]
- o. **iMaxAmp** (*float*): max amplitude of a tone present in a signal [default = 1.0]
- p. **iMinPhs** (*float*): min allowed phase of a tone present in a signal [default = -179 deg]
- q. **iGraPhs** (*float*): gradation of a random phase of a tone [default = 1 deg]
- r. **iMaxPhs** (*float*): max allowed phase of a tone present in a signal [default = +180 deg]
- s. **bMute** (*int*): mute the console output from the generator [default = 0]
*Output*:
Description of the generator output is below.
This is the list of attributes of the generator class which are available
after calling the 'run' method:
- a. **mSig** (*Numpy array 2D*): Matrix with output signals
- b. **mSigNN** (*Numpy array 2D*): Matrix with nonnoisy output signals
- c. **nSmp** (*int*): The number of samples in the signals
- d. **vTSig** (*Numpy array 1D*): The time vector for the generated signals (name alias vT is available)
- e. **vP** (*Numpy array 1D*): Power of the signals
- f. **vPNN** (*Numpy array 1D*): Power of the non noisy signals
- g. **vPCoef** (*Numpy array 1D*): Power adjustment coefficients
- h. **mFrqs** (*Numpy array 2D*): Frequencies of tones in the signals
- i. **mAmps** (*Numpy array 2D*): Amplitudes of tones in the signals
- j. **mPhs** (*Numpy array 2D*): Phases of tones in the signals
- k. **mAmPh** (*Numpy array 2D*): Complex matrix with amplitudes/phases of tones
- l. **fFFTR** (*float*): Signal FFT frequency resolution
*Author*:
<NAME>, Aalborg University, Denmark. <<EMAIL>>
*Version*:
0.1 | 15-MAY-2014 : * Initial version. |br|
0.2 | 16-MAY-2014 : * Docstrings added. |br|
0.3 | 19-MAY-2014 : * The main func. divided into smaller functions. |br|
0.4 | 20-MAY-2014 : * Errors are served by 'raise'. |br|
0.5 | 20-MAY-2014 : * Docstrings are added to the internal functions. |br|
0.5r1 | 20-MAY-2014 : * Order of the internal functions was changed. |br|
1.0 | 20-MAY-2014 : * Version 1.0 released. |br|
1.0r1 | 21-MAY-2014 : * Error in progress printing is fixed. |br|
1.0r2 | 21-MAY-2014 : * Error in default random amplitude is fixed. |br|
1.0r3 | 21-MAY-2014 : * Error in progress printing (if muted) is fixed. |br|
1.0r4 | 27-MAY-2014 : * Error in the number of given frqs is fixed. |br|
1.0r5 | 27-MAY-2014 : * Error in the vector with signal time is fixed. |br|
2.0 | 21-JUL-2015 : * Objectified version (2.0) |br|
2.0r1 | 18-AUG-2015 : * Adjusted to RxCSobject v1.0 |br|
2.1 | 02-SEP-2015 : * Max frequency in the spectrum is an optional parameter
printing improvements. |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
class randMult(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Signal generator' # Name of group of RxCS modules
self.strModuleName = 'Random multitone' # Module name
self.__parametersDefine() # Define the parameters
# Define parameters
def __parametersDefine(self):
# Time of the signal [s]
self.paramAddMan('tS', 'Time of the signal', unit='s')
self.paramType('tS', (int, float))
self.paramH('tS', 0)
self.paramL('tS', np.inf)
# The signal representation sampling frequency [Hz]
self.paramAddMan('fR', 'The signal representation sampling frequency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0)
self.paramL('fR', np.inf)
# The highest possible frequency in the signal [Hz]
self.paramAddOpt('fMax', 'The highest possible frequency in the signal', unit='Hz', default='$$fR', mul=0.4)
self.paramType('fMax', (int, float))
self.paramH('fMax', 0)
self.paramL('fMax', 'fR', mul=0.5) # Nyquist principle
# Signal spectrum resolution [Hz]
self.paramAddMan('fRes', 'Signal spectrum resolution', unit='Hz')
self.paramType('fRes', (int, float))
self.paramH('fRes', 0)
self.paramL('fRes', np.inf)
# The minimum frequency of additional tones
self.paramAddOpt('fMin', 'Minimum frequency of additional tones', default='$$fRes', unit='Hz')
self.paramType('fMin', (int, float))
self.paramH('fMin', 0)
self.paramLE('fMin', 'fMax')
# The number of signals to be generated
self.paramAddOpt('nSigs', 'The number of signals to be generated', default=1)
self.paramType('nSigs', (int))
self.paramH('nSigs', 0) # The number of signals must be higher than zero
self.paramL('nSigs', np.inf) # ...and lower than infinity
# Signal noise [dB]
self.paramAddOpt('iSNR', 'Signal to noise ratio', default=np.inf, unit='dB')
self.paramType('iSNR', (int, float))
# Power of signals [W]
self.paramAddOpt('iP', 'Power of signals', default=np.nan, unit='W')
self.paramType('iP', (int, float))
self.paramH('iP', 0)
self.paramL('iP', np.inf)
# Vector with given frequencies of signal cosine tones
self.paramAddOpt('vFrqs', 'Vector with given frequencies of signal cosine tones', default=np.zeros(0), noprint=1)
self.paramType('vFrqs', np.ndarray)
strError = 'Frequencies in \'vFrqs\' vector must not be higher than the highest possible in the signal spectrum!'
self.paramLE('vFrqs', 'fMax', errnote=strError)
strError = 'Frequencies in the \'vFrqs\' must be higher than 0!'
self.paramH('vFrqs', 0, errnote=strError)
strError = 'Frequencies in \'vFrqs\' vector must not be lower than the lowest possible in the signal spectrum!'
self.paramHE('vFrqs', 'fMin', errnote=strError)
strError = 'Size of the vector with given frequencies \'vFrqs\' must be equal to size of the vectors \'vAmps\' and \'vPhs\''
self.paramSizEq('vFrqs', 'vAmps', errnote=strError)
self.paramSizEq('vFrqs', 'vPhs', errnote=strError)
self.paramNDimEq('vFrqs', 1)
strError = 'There are frequencies repeated in the \'vFrqs\' vector!'
self.paramUnique('vFrqs', errnote=strError)
self.NaNAllowedEl('vFrqs')
# Vector with given amplitudes of signal cosine tones
self.paramAddOpt('vAmps', 'Vector with given amplitudes of signal cosine tones', default=np.zeros(0), noprint=1)
self.paramType('vAmps', np.ndarray)
self.paramH('vAmps', 0, errnote='Amplituides of tones must be higher than 0')
self.paramNDimEq('vAmps', 1)
self.NaNAllowedEl('vAmps')
# Vector with given phases of signal cosine tones content
self.paramAddOpt('vPhs', 'Vector with given phases of signal cosine tones', default=np.zeros(0), noprint=1)
self.paramType('vPhs', np.ndarray)
self.paramH('vPhs', -180, errnote='Phases of tones must be higher than -180 [deg]')
self.paramLE('vPhs', 180, errnote='Phases of tones must be lower or equal to 180 [deg]')
self.paramNDimEq('vPhs', 1)
self.NaNAllowedEl('vPhs')
# The number of additional tones
self.paramAddOpt('nTones', 'The number of additional tones', default=0)
self.paramType('nTones', (int))
self.paramHE('nTones', 0)
# The boundaries for amplitudes: amplitude min value
self.paramAddOpt('iMinAmp', 'Minimum value of random amplitudes', default=0.1, unitprefix=' ')
self.paramType('iMinAmp', (int, float))
self.paramH('iMinAmp', 0)
self.paramLE('iMinAmp','iMaxAmp')
# The boundaries for amplitudes: amplitude gradation
self.paramAddOpt('iGraAmp', 'Gradation of value of random amplitudes', default=0.1, unitprefix=' ')
self.paramType('iGraAmp', (int, float))
self.paramH('iGraAmp', 0)
# The boundaries for amplitudes: amplitude max value
self.paramAddOpt('iMaxAmp', 'Maximum value of random amplitudes', default=1.0, unitprefix=' ')
self.paramType('iMaxAmp', (int, float))
self.paramH('iMaxAmp', 0)
# The boundaries for amplitudes: phase min value
self.paramAddOpt('iMinPhs', 'Minimum value of random phase', default=-179, unitprefix=' ')
self.paramType('iMinPhs', (int, float))
self.paramH('iMinPhs',-180)
self.paramLE('iMinPhs',180)
self.paramLE('iMinPhs','iMaxPhs')
# The boundaries for amplitudes: phase gradation
self.paramAddOpt('iGraPhs', 'Gradation of value of random phase', default=1, unitprefix=' ')
self.paramType('iGraPhs', (int, float))
self.paramH('iGraPhs', 0)
# The boundaries for amplitudes: phase max value
self.paramAddOpt('iMaxPhs', 'Maximum value of random phase', default=180, unitprefix=' ')
self.paramType('iMaxPhs', (int, float))
self.paramH('iMaxPhs',-180)
self.paramLE('iMaxPhs',180)
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
# Engine of the function
def __engine(self):
self._checkConf()
# - - - - - - - - - - - - - - - - - - -
# Signal generation starts here:
self.mFrqsInx = self._drawFreq(self.vFrqs, self.nTones, self.fMin, self.fMax, self.nSigs, self.fRes) # Draw frequencies of the signals
self.mAmps = self._drawAmps(self.vAmps, self.nTones, self.nSigs, self.iMinAmp, self.iGraAmp, self.iMaxAmp) # Draw amplitudes of the signals
self.mPhs = self._drawPhases(self.vPhs, self.nTones, self.nSigs, self.iMinPhs, self.iGraPhs, self.iMaxPhs) # Draw phases of the signals
# Generate the signals by IFFT
(mSig, self.mAmPh, self.mFrqs, self.fFFTR) = \
self._genSigs(self.mFrqsInx, self.mAmps, self.mPhs, self.nSigs, self.tS, self.fR, self.fRes)
# - - - - - - - - - - - - - - - - - - -
# Adjust the signal power
(mSig, vP, self.vPCoef, self.mAmps, self.mAmPh) = self._adjPower(mSig, self.iP, self.mAmps, self.mAmPh)
# Add the AWGN noise to the signals
(self.mSigNN, self.vPNN, self.mSig, self.vP) = self._addNoise(mSig, vP, self.iSNR)
# Generate the time vector for the signal
self.vTSig = np.arange(self.nSmp) / self.fR
self.vT = self.vTSig # Name alias for vTSig is vT
return
def _checkConf(self):
"""
This function checks the configuration of the generator.
"""
#----------------------------------------------------------------------
# If the minimum frequency was not given, it is equal to the frequency resolution
if np.isnan(self.fMin):
self.fMin = self.fRes
#----------------------------------------------------------------------
# Check the lowest possible frequency in the signal vs spectrum
# resolution
strErr = 'The lowest possible frequency in the signal is not a multiple '
strErr = strErr + 'of the signal spectrum resolution'
if (round(self.fMin/self.fRes) - self.fMin/self.fRes) > 1e-15:
raise ValueError(strErr)
#----------------------------------------------------------------------
# Check the highest possible frequency in the signal vs spectrum
# resolution
strErr = 'The highest possible frequency in the signal is not a multiple '
strErr = strErr + 'of the signal spectrum resolution'
if (round(self.fMax/self.fRes) - self.fMax/self.fRes) > 1e-15:
raise ValueError(strErr)
#----------------------------------------------------------------------
# Check if there is a space for all the frequencies requested in
# the signal, both given in the vFrqs vector and requested to be
# chosen randomly
# Compute the total number of tones in the max possible spectrum
nSpectTones = int(self.fMax/self.fRes) - int(self.fMin/self.fRes) + 1
nFG = self.vFrqs.size # The number of frequencies given in the vector with frequencies
nSigTones = nFG + self.nTones # The total number of tones which will be present in the signal
strErr = 'The signal spectrum consists of %d tones. ' % (nSpectTones)
strErr = strErr + 'I can not put there %d [vFrqs] + %d [nTones] tones' \
% (nFG, self.nTones)
if nSpectTones < nSigTones:
raise ValueError(strErr)
#----------------------------------------------------------------------
# Check if there is a frequency leackage
self.nSmp = int(round(self.tS*self.fR)) # Calculate the number of samples in the signals
fFFTR = self.fR/self.nSmp # Calculate the FFT frequency resolution
if abs(round(self.fRes/fFFTR) - self.fRes/fFFTR) > 0:
strErr = ('Frequency leackage! Signal spectrum resolution can not be ')
strErr = strErr + ('represented with the current signal parameters!')
raise ValueError(strErr)
#----------------------------------------------------------------------
# Check the vector with given frequencies
# Check the vector with given frequencies, if it is longer then 0
if nFG > 0:
# Create the vector with given and specified frequencies (not np.nan)
vFrqs_ = self.vFrqs[np.isnan(self.vFrqs) == 0]
# 1. check resolution of given frequencies:
if np.abs(np.sum(np.round(vFrqs_/self.fRes) - (vFrqs_/self.fRes))) > 1e-15:
strErr = ('A frequency given in the vFrqs vector is ')
strErr = strErr + ('incoherent with the resolution of signal ')
strErr = strErr + ('spectrum!\n')
raise ValueError(strErr)
# Do correction of possible representation errors in the vector with frequencies
self.vFrqs = np.round(self.vFrqs/self.fRes)*self.fRes
#----------------------------------------------------------------------
return
# =================================================================
# Draw frequencies of the signals
# =================================================================
def _drawFreq(self, vFrqs, nTones, fMin, fMax, nSigs, fRes):
"""
This function draws frequencies of tones for all the signals
according to the rules specified by users,
Args:
vFrqs (vector): vector with specified frequencies
nTones (int): the number of additional tones
fMin (int): the min allowed frequency in the signal spectrum
fMax (int): the max allowed frequency in the signal spectrum
nSigs (int): the number of signals to be generated
fRes (int): signal spectrum resolution
(distance between the tones in the spectrum)
Returns:
mFrqsInx (matrix): matrix with frequencies of tones for all
the signals (one row - one signal)
The frequencies are represented as indices of
frequencies from the allowed signal spectrum.
"""
#----------------------------------------------------------------------
# Recalculate frequencies to indices of tones in the spectrum
# Recalculate the vector with frequencies from frequency to index
# of a frequency in the spectrum
vFrqsInx = (vFrqs / fRes)
# Create a vector with GIVEN (not nan) indices of frequencies in the
# vFrqs vector
vFrqsInx_ = (vFrqsInx[np.isnan(vFrqsInx) == 0]).astype(int)
#----------------------------------------------------------------------
# Create the vector with indices of avaialble frequencies in the spectrum
# Create a vector with indices of all the available tones in the spectrum
vSpecInx = np.arange(1, int(fMax/fRes) + 1)
# Boolean vector which indicates if the frequency is free
vFreqIsFree = np.ones(int(fMax/fRes)).astype(bool)
# Mark all the frequencies below min frequency as unavailable
for inxF in np.arange(int(fMin/fRes)-1):
vFreqIsFree[inxF] = 0
# Mark the frequencies taken by vFreq vector as unavailable
vFreqIsFree[vFrqsInx_ - 1] = 0
# Create the vector with indices of available frequencies
vAvailFreqsInx = vSpecInx[vFreqIsFree]
#----------------------------------------------------------------------
# Construct a vector with indices of frequencies for all the needed signals
# Add unknown frequencies of the additional tones to the vFrqsInx vector
vFrqsInx = np.concatenate((vFrqsInx, np.nan*np.zeros(nTones)))
# Calculate the number of missing frequencies in the vector with
# frequencies
iMissF = len(vFrqsInx) - len(vFrqsInx_)
# Construct a matrix with indices of frequencies for all the needed signals
mFrqsInx = np.tile(vFrqsInx, (nSigs, 1))
#----------------------------------------------------------------------
# Draw the frequencies
for inxSig in np.arange(nSigs):
# Permute all the indices of frequencies in the spectrum
vPermutedFreqsInx = ((np.random.permutation(vAvailFreqsInx)).T)
# From the permuted indices of frequencices take as many
# as it is missing
vTakenFreqsInx = vPermutedFreqsInx[np.arange(iMissF)]
# Put the taken indices of frequencies to the matrix with frequency
# indices for all the signals
mFrqsInx[inxSig, np.isnan(mFrqsInx[inxSig, :])] = vTakenFreqsInx
return mFrqsInx
# =================================================================
# Draw amplitudes of the signals
# =================================================================
def _drawAmps(self, vAmps, nTones, nSigs, iMinAmp, iGraAmp, iMaxAmp):
"""
This function draws amplitudes of tones for all the signals
according to the rules specified by users,
Args:
vAmps (vector): vector with specified amplitudes of tones in signals
nTones (int): the number of additional tones
nSigs (int): the number of signals to be generated
iMinAmp (int): min amplitude of a random tone present in a signal
iGraAmp (int): gradation of a amplitude of a random tone
iMaxAmp (int): max amplitude of a random tone present in a signal
Returns:
mAmps (matrix): matrix with amplitudes of tones for all
the signals (one row - one signal)
"""
# Add unknown amplitudes of the additional tones to the vAmps vector
vAmps = np.concatenate((vAmps, np.nan*np.zeros(nTones)))
# Compute the number of missing amplitudes for every signal
iMissA = (vAmps[np.isnan(vAmps)]).size
#----------------------------------------------------------------------
# Compute the number of possible amplitude values
nAmpVal = np.floor((iMaxAmp - iMinAmp) / iGraAmp) + 1
#----------------------------------------------------------------------
# Draw the missing amplitudes for all the signals
vDrawAmps = \
iMinAmp + iGraAmp*(np.random.randint(0, nAmpVal, (nSigs*iMissA)))
# Construct a matrix with amplitudes of tones for all the needed signals
mAmps = np.tile(vAmps, (nSigs, 1))
# Put the draw amplitudes to the matrix with amplitudes of tones for
# all the needed signals
mAmps[np.isnan(mAmps)] = vDrawAmps
return mAmps
# =================================================================
# Draw phases of the signals
# =================================================================
def _drawPhases(self, vPhs, nTones, nSigs, iMinPhs, iGraPhs, iMaxPhs):
"""
This function draws phases of tones for all the signals
according to the rules specified by users,
Args:
vPhs (vector): vector with specified phases of tones in signals
nTones (int): the number of additional tones
nSigs (int): the number of signals to be generated
iMinPhs (int): min phase of a random tone present in a signal
iGraPhs (int): gradation of a phase of a random tone
iMaxPhs (int): max phase of a random tone present in a signal
Returns:
mPhs (matrix): matrix with phases of tones for all
the signals (one row - one signal)
"""
# Add unknown phases of the additional tones to the vAmps vector
vPhs = np.concatenate((vPhs, np.nan*np.zeros(nTones)))
# Compute the number of missing phases for every signal
iMissP = (vPhs[np.isnan(vPhs)]).size
#----------------------------------------------------------------------
# Compute the number of possible phase values
nPhsVal = np.floor((iMaxPhs - iMinPhs)/iGraPhs) + 1
#----------------------------------------------------------------------
# Draw the missing phases for all the signals
vDrawPhs = \
iMinPhs + iGraPhs*(np.random.randint(0, nPhsVal, (nSigs*iMissP)))
# Construct a matrix with phases of tones for all the needed signals
mPhs = np.tile(vPhs, (nSigs, 1))
# Put the draw phases to the matrix with phases of tones for
# all the needed signals
mPhs[np.isnan(mPhs)] = vDrawPhs
return mPhs
# =================================================================
# Generate the signals by IFFT
# =================================================================
def _genSigs(self, mFrqsInx, mAmps, mPhs, nSigs, tS, fR, fRes):
"""
This function generate the multitone signals using the IFFT algorithm.
Args:
mFrqsInx (matrix): matrix with freqs of tones for all the signals
(as indices of tones in the allowed spectrum)
mAmps (matrix): matrix with amplitudes of tones for all the signals
mPhs (matrix): matrix with phases of tones for all the signals
nSigs (float): the number of signals
tS (float): time of the signals
fR (float): signal representation sampling frequency
fRes (float): signal spectrum resolution
(distance between the tones in the spectrum)
Returns:
mSig (matrix): matrix with signals (one row - one signal)
mAmPh (float): complex matrix with amplitudes/phases of tones
mFrqs (matrix): matrix with freqs of tones for all the signals
fFFTR (float): signal FFT frequency resolution
"""
# Calculate the number of samples in the signals
nSmp = int(round(tS*fR))
# Calculate the FFT frequency resolution
fFFTR = fR/nSmp
#----------------------------------------------------------------------
# Adjust the amplitudes value to the number of points
mAmpsAdj = mAmps * nSmp/2
# Change phases into radians
mPhsRad = mPhs*np.pi/180
# Generate a one complex matrix for all the signals and its conjugated copy
mAmPh = mAmpsAdj*np.cos(mPhsRad) + 1j*mAmpsAdj*np.sin(mPhsRad)
mAmPh_conj = np.conjugate(mAmPh)
#----------------------------------------------------------------------
# Put the complex matrix with amplitudes and phases of tones into
# one matrix dedicated for IFFT
# Recalculate the matrix with indices of frequencies in the spectrum
# to real frequencies
mFrqs = mFrqsInx*fRes
# Recalculate the matrix with indices of frequencies in the spectrum
# to indices of frequencies in the IFFT transform
mIFFTFrqsInx = np.around(mFrqs/fFFTR).astype(int)
# Allocate the vector for the ifft coefficients for all the signals
# (one signal in one row)
mIFFT = np.zeros((nSigs, nSmp)) + 1j*np.zeros((nSigs, nSmp))
# Put the complex vector with tones values into the IFFT matrix
for inxSig in np.arange(nSigs):
# IFFT indices of tones for the current signal
vInx = mIFFTFrqsInx[inxSig, :]
# Put the tones for the current signal
mIFFT[inxSig, vInx] = mAmPh[inxSig, :]
# IFFT indices of conjugate tones for the current signal
vInxConj = (nSmp - mIFFTFrqsInx[inxSig, :]).astype(int)
# Put the conjugate tones for the current signal
mIFFT[inxSig, vInxConj] = mAmPh_conj[inxSig, :]
#----------------------------------------------------------------------
# Generate the signals (perform the IFFT)
mSig = np.fft.ifftn(mIFFT, axes=[1]).real
return (mSig, mAmPh, mFrqs, fFFTR)
# =================================================================
# Adjust the signal power
# =================================================================
def _adjPower(self, mSig, iP, mAmps, mAmPh):
"""
This function adjustes powers of the generated signals.
If the requested power of the signals is equal to NaN or inf, then
the signals are not adjusted.
Args:
mSig (matrix): matrix with signals (one row - one signal)
iP (float): requested power of the signals
mAmps (matrix): matrix with amplitudes of tones in the signals
mAmPh (matrix): complex matrix with amplitudes/phases of tones
Returns:
mSig (matrix): matrix with noisy signals
vP (vector): vector with powers of noisy signals
vPCoef (vector): vector with coefficients which adjsuted the signals
mAmps (matrix): matrix with adjusted amplitudes of tones
mAmPh (matrix): complex matrix with adjusted amplitudes/phases
"""
# Get the number of signals and the size of signals (the number of samples)
(nSigs, nSmp) = mSig.shape
# Measure the power of the signals
vP = (np.sum(mSig * mSig, axis=1) / nSmp).reshape(nSigs, 1)
# Adjust the signal power, if needed
if not np.isnan(iP) or np.isinf(iP):
# Compute power adjustments coefficients for the noise signals
vPCoef = np.sqrt(iP / vP)
# Adjust the signal power
mPCoef = np.tile(vPCoef, (1, nSmp))
mSig = mSig * mPCoef
# Adjust the reported amplitudes of tones
(_, nAmps) = mAmps.shape
mPCoef = np.tile(vPCoef, (1, nAmps))
mAmps = mAmps * mPCoef
mAmPh = mAmPh * mPCoef
# Measure the power of the adjusted signals
vP = np.sum(mSig*mSig, axis=1) / nSmp
else:
# Power adjustment coefficients are equal to 1 (no adjustment)
vPCoef = np.ones((nSigs, 1))
return (mSig, vP, vPCoef, mAmps, mAmPh)
# =================================================================
# Add the AWGN noise to the signals
# =================================================================
def _addNoise(self, mSig, vP, iSNR):
"""
This function adds noise to the generated signals.
If the requested level of noise is equal to NaN or inf,
then no noise is added.
Args:
mSig (matrix): matrix with signals (one row - one signal)
vP (vector): vector with powers of signals
iSNR (float): wanted level of noise in the signals
Returns:
mSigNN (matrix): matrix with non noisy signals
vPNN (vector): vector with powers of non noisy signals
mSig (matrix): matrix with noisy signals
vP (vector): vector with powers of noisy signals
"""
# Backup the non noisy signals
mSigNN = mSig.copy() # Matrix with signals
vPNN = vP.copy() # Power of non noisy signals
# Add the noise, if needed
if not (np.isnan(iSNR) or np.isinf(iSNR)):
# Get the number of signals and the size of signals
# (the number of samples)
(nSigs, nSmp) = mSig.shape
# Generate the noise
mNoise = np.random.randn(nSigs, nSmp)
# Measure the current powers of the noise signals
vNoisePReal = (np.sum(mNoise*mNoise, axis=1) / nSmp).reshape(nSigs, 1)
# Compute the requested noise power for every signal
vNoiseP = (vP / (10**(iSNR/10))).reshape(nSigs, 1)
# Compute power adjustments coefficients for the noise signals
vPNoiseCoef = | np.sqrt(vNoiseP / vNoisePReal) | numpy.sqrt |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [np.array([0.0, GOLDEN_RATIO])], [np.array([0.0, 0.5])], approx=True
)
# extra stack elements
assert_token(SINH, [0, GOLDEN_RATIO], [0, 0.5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SINH([], {})
class TestCOSHOperator:
def test_repr(self):
assert repr(COSH) == "COSH"
def test_pops(self):
assert COSH.pops == 1
def test_puts(self):
assert COSH.puts == 1
def test_no_copy(self):
assert copy(COSH) is COSH
assert deepcopy(COSH) is COSH
def test_call(self):
assert_token(COSH, [0.0], [1.0], approx=True)
assert_token(COSH, [GOLDEN_RATIO], [math.sqrt(5) / 2], approx=True)
assert_token(
COSH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([1.0, np.sqrt(5) / 2])],
approx=True,
)
# extra stack elements
assert_token(COSH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSH([], {})
class TestTANHOperator:
def test_repr(self):
assert repr(TANH) == "TANH"
def test_pops(self):
assert TANH.pops == 1
def test_puts(self):
assert TANH.puts == 1
def test_no_copy(self):
assert copy(TANH) is TANH
assert deepcopy(TANH) is TANH
def test_call(self):
assert_token(TANH, [0.0], [0.0], approx=True)
assert_token(TANH, [GOLDEN_RATIO], [math.sqrt(5) / 5], approx=True)
assert_token(
TANH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([0.0, np.sqrt(5) / 5])],
approx=True,
)
# extra stack elements
assert_token(TANH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TANH([], {})
class TestASINOperator:
def test_repr(self):
assert repr(ASIN) == "ASIN"
def test_pops(self):
assert ASIN.pops == 1
def test_puts(self):
assert ASIN.puts == 1
def test_no_copy(self):
assert copy(ASIN) is ASIN
assert deepcopy(ASIN) is ASIN
def test_call(self):
assert_token(ASIN, [0.0], [0.0], approx=True)
assert_token(ASIN, [1 / 2], [math.pi / 6], approx=True)
assert_token(ASIN, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ASIN, [math.sqrt(3) / 2], [math.pi / 3], approx=True)
assert_token(ASIN, [1.0], [math.pi / 2], approx=True)
assert_token(
ASIN,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
ASIN,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ASIN, [0, 1.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIN([], {})
class TestACOSOperator:
def test_repr(self):
assert repr(ACOS) == "ACOS"
def test_pops(self):
assert ACOS.pops == 1
def test_puts(self):
assert ACOS.puts == 1
def test_no_copy(self):
assert copy(ACOS) is ACOS
assert deepcopy(ACOS) is ACOS
def test_call(self):
assert_token(ACOS, [1.0], [0.0], approx=True)
assert_token(ACOS, [math.sqrt(3) / 2], [math.pi / 6], approx=True)
assert_token(ACOS, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ACOS, [1 / 2], [math.pi / 3], approx=True)
assert_token(ACOS, [0.0], [math.pi / 2], approx=True)
assert_token(
ACOS,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ACOS, [0, 0.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOS([], {})
class TestATANOperator:
def test_repr(self):
assert repr(ATAN) == "ATAN"
def test_pops(self):
assert ATAN.pops == 1
def test_puts(self):
assert ATAN.puts == 1
def test_no_copy(self):
assert copy(ATAN) is ATAN
assert deepcopy(ATAN) is ATAN
def test_call(self):
assert_token(ATAN, [0.0], [0.0], approx=True)
assert_token(ATAN, [1 / math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN, [1.0], [math.pi / 4], approx=True)
assert_token(ATAN, [math.sqrt(3)], [math.pi / 3], approx=True)
assert_token(
ATAN,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
assert_token(
ATAN,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
# extra stack elements
assert_token(ATAN, [0, 1.0], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN([], {})
class TestASINDOperator:
def test_repr(self):
assert repr(ASIND) == "ASIND"
def test_pops(self):
assert ASIND.pops == 1
def test_puts(self):
assert ASIND.puts == 1
def test_no_copy(self):
assert copy(ASIND) is ASIND
assert deepcopy(ASIND) is ASIND
def test_call(self):
assert_token(ASIND, [0.0], [0], approx=True)
assert_token(ASIND, [1 / 2], [30], approx=True)
assert_token(ASIND, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ASIND, [math.sqrt(3) / 2], [60], approx=True)
assert_token(ASIND, [1.0], [90], approx=True)
assert_token(
ASIND,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
ASIND,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ASIND, [0, 1.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIND([], {})
class TestACOSDOperator:
def test_repr(self):
assert repr(ACOSD) == "ACOSD"
def test_pops(self):
assert ACOSD.pops == 1
def test_puts(self):
assert ACOSD.puts == 1
def test_no_copy(self):
assert copy(ACOSD) is ACOSD
assert deepcopy(ACOSD) is ACOSD
def test_call(self):
assert_token(ACOSD, [1.0], [0], approx=True)
assert_token(ACOSD, [math.sqrt(3) / 2], [30], approx=True)
assert_token(ACOSD, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ACOSD, [1 / 2], [60], approx=True)
assert_token(ACOSD, [0.0], [90], approx=True)
assert_token(
ACOSD,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ACOSD, [0, 0.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSD([], {})
class TestATANDOperator:
def test_repr(self):
assert repr(ATAND) == "ATAND"
def test_pops(self):
assert ATAND.pops == 1
def test_puts(self):
assert ATAND.puts == 1
def test_no_copy(self):
assert copy(ATAND) is ATAND
assert deepcopy(ATAND) is ATAND
def test_call(self):
assert_token(ATAND, [0.0], [0], approx=True)
assert_token(ATAND, [1 / math.sqrt(3)], [30], approx=True)
assert_token(ATAND, [1.0], [45], approx=True)
assert_token(ATAND, [math.sqrt(3)], [60], approx=True)
assert_token(
ATAND,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0, 30, 45, 60])],
approx=True,
)
assert_token(
ATAND,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0, 30, 45, 60])],
approx=True,
)
# extra stack elements
assert_token(ATAND, [0, 1.0], [0, 45], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAND([], {})
class TestASINHOperator:
def test_repr(self):
assert repr(ASINH) == "ASINH"
def test_pops(self):
assert ASINH.pops == 1
def test_puts(self):
assert ASINH.puts == 1
def test_no_copy(self):
assert copy(ASINH) is ASINH
assert deepcopy(ASINH) is ASINH
def test_call(self):
assert_token(ASINH, [0.0], [0.0], approx=True)
assert_token(ASINH, [0.5], [GOLDEN_RATIO], approx=True)
assert_token(
ASINH, [np.array([0.0, 0.5])], [np.array([0.0, GOLDEN_RATIO])], approx=True
)
# extra stack elements
assert_token(ASINH, [0, 0.5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASINH([], {})
class TestACOSHOperator:
def test_repr(self):
assert repr(ACOSH) == "ACOSH"
def test_pops(self):
assert ACOSH.pops == 1
def test_puts(self):
assert ACOSH.puts == 1
def test_no_copy(self):
assert copy(ACOSH) is ACOSH
assert deepcopy(ACOSH) is ACOSH
def test_call(self):
assert_token(ACOSH, [1.0], [0.0], approx=True)
assert_token(ACOSH, [math.sqrt(5) / 2], [GOLDEN_RATIO], approx=True)
assert_token(
ACOSH,
[np.array([1.0, np.sqrt(5) / 2])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ACOSH, [0, math.sqrt(5) / 2], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSH([], {})
class TestATANHOperator:
def test_repr(self):
assert repr(ATANH) == "ATANH"
def test_pops(self):
assert ATANH.pops == 1
def test_puts(self):
assert ATANH.puts == 1
def test_no_copy(self):
assert copy(ATANH) is ATANH
assert deepcopy(ATANH) is ATANH
def test_call(self):
assert_token(ATANH, [0.0], [0.0], approx=True)
assert_token(ATANH, [math.sqrt(5) / 5], [GOLDEN_RATIO], approx=True)
assert_token(
ATANH,
[np.array([0.0, np.sqrt(5) / 5])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ATANH, [0, math.sqrt(5) / 5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATANH([], {})
class TestISNANOperator:
def test_repr(self):
assert repr(ISNAN) == "ISNAN"
def test_pops(self):
assert ISNAN.pops == 1
def test_puts(self):
assert ISNAN.puts == 1
def test_no_copy(self):
assert copy(ISNAN) is ISNAN
assert deepcopy(ISNAN) is ISNAN
def test_call(self):
assert_token(ISNAN, [2], [False])
assert_token(ISNAN, [float("nan")], [True])
assert_token(ISNAN, [np.array([4, np.nan])], [np.array([False, True])])
assert_token(ISNAN, [np.array([np.nan, 1])], [np.array([True, False])])
# extra stack elements
assert_token(ISNAN, [0, float("nan")], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISNAN([], {})
class TestISANOperator:
def test_repr(self):
assert repr(ISAN) == "ISAN"
def test_pops(self):
assert ISAN.pops == 1
def test_puts(self):
assert ISAN.puts == 1
def test_no_copy(self):
assert copy(ISAN) is ISAN
assert deepcopy(ISAN) is ISAN
def test_call(self):
assert_token(ISAN, [2], [True])
assert_token(ISAN, [float("nan")], [False])
assert_token(ISAN, [np.array([4, np.nan])], [np.array([True, False])])
assert_token(ISAN, [np.array([np.nan, 1])], [np.array([False, True])])
# extra stack elements
assert_token(ISAN, [0, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISAN([], {})
class TestRINTOperator:
def test_repr(self):
assert repr(RINT) == "RINT"
def test_pops(self):
assert RINT.pops == 1
def test_puts(self):
assert RINT.puts == 1
def test_no_copy(self):
assert copy(RINT) is RINT
assert deepcopy(RINT) is RINT
def test_call(self):
assert_token(RINT, [1.6], [2])
assert_token(RINT, [2.4], [2])
assert_token(RINT, [-1.6], [-2])
assert_token(RINT, [-2.4], [-2])
assert_token(RINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(RINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(RINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
RINT([], {})
class TestNINTOperator:
def test_repr(self):
assert repr(NINT) == "NINT"
def test_pops(self):
assert NINT.pops == 1
def test_puts(self):
assert NINT.puts == 1
def test_no_copy(self):
assert copy(NINT) is NINT
assert deepcopy(NINT) is NINT
def test_call(self):
assert_token(NINT, [1.6], [2])
assert_token(NINT, [2.4], [2])
assert_token(NINT, [-1.6], [-2])
assert_token(NINT, [-2.4], [-2])
assert_token(NINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(NINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(NINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NINT([], {})
class TestCEILOperator:
def test_repr(self):
assert repr(CEIL) == "CEIL"
def test_pops(self):
assert CEIL.pops == 1
def test_puts(self):
assert CEIL.puts == 1
def test_no_copy(self):
assert copy(CEIL) is CEIL
assert deepcopy(CEIL) is CEIL
def test_call(self):
assert_token(CEIL, [1.6], [2])
assert_token(CEIL, [2.4], [3])
assert_token(CEIL, [-1.6], [-1])
assert_token(CEIL, [-2.4], [-2])
assert_token(CEIL, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEIL, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEIL, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEIL([], {})
class TestCEILINGOperator:
def test_repr(self):
assert repr(CEILING) == "CEILING"
def test_pops(self):
assert CEILING.pops == 1
def test_puts(self):
assert CEILING.puts == 1
def test_no_copy(self):
assert copy(CEILING) is CEILING
assert deepcopy(CEILING) is CEILING
def test_call(self):
assert_token(CEILING, [1.6], [2])
assert_token(CEILING, [2.4], [3])
assert_token(CEILING, [-1.6], [-1])
assert_token(CEILING, [-2.4], [-2])
assert_token(CEILING, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEILING, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEILING, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEILING([], {})
class TestFLOOROperator:
def test_repr(self):
assert repr(FLOOR) == "FLOOR"
def test_pops(self):
assert FLOOR.pops == 1
def test_puts(self):
assert FLOOR.puts == 1
def test_no_copy(self):
assert copy(FLOOR) is FLOOR
assert deepcopy(FLOOR) is FLOOR
def test_call(self):
assert_token(FLOOR, [1.6], [1])
assert_token(FLOOR, [2.4], [2])
assert_token(FLOOR, [-1.6], [-2])
assert_token(FLOOR, [-2.4], [-3])
assert_token(FLOOR, [np.array([1.6, 2.4])], [np.array([1, 2])])
assert_token(FLOOR, [np.array([-1.6, -2.4])], [np.array([-2, -3])])
# extra stack elements
assert_token(FLOOR, [0, 1.8], [0, 1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FLOOR([], {})
class TestD2ROperator:
def test_repr(self):
assert repr(D2R) == "D2R"
def test_pops(self):
assert D2R.pops == 1
def test_puts(self):
assert D2R.puts == 1
def test_no_copy(self):
assert copy(D2R) is D2R
assert deepcopy(D2R) is D2R
def test_call(self):
assert_token(D2R, [0], [0.0], approx=True)
assert_token(D2R, [30], [math.pi / 6], approx=True)
assert_token(D2R, [45], [math.pi / 4], approx=True)
assert_token(D2R, [60], [math.pi / 3], approx=True)
assert_token(D2R, [90], [math.pi / 2], approx=True)
assert_token(
D2R,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
D2R,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(D2R, [0, 90], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
D2R([], {})
class TestR2DOperator:
def test_repr(self):
assert repr(R2D) == "R2D"
def test_pops(self):
assert R2D.pops == 1
def test_puts(self):
assert R2D.puts == 1
def test_no_copy(self):
assert copy(R2D) is R2D
assert deepcopy(R2D) is R2D
def test_call(self):
assert_token(R2D, [0.0], [0], approx=True)
assert_token(R2D, [math.pi / 6], [30], approx=True)
assert_token(R2D, [math.pi / 4], [45], approx=True)
assert_token(R2D, [math.pi / 3], [60], approx=True)
assert_token(R2D, [math.pi / 2], [90], approx=True)
assert_token(
R2D,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
R2D,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(R2D, [0, math.pi / 2], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2D([], {})
class TestYMDHMSOperator:
def test_repr(self):
assert repr(YMDHMS) == "YMDHMS"
def test_pops(self):
assert YMDHMS.pops == 1
def test_puts(self):
assert YMDHMS.puts == 1
def test_no_copy(self):
assert copy(YMDHMS) is YMDHMS
assert deepcopy(YMDHMS) is YMDHMS
def test_call(self):
epoch = datetime(1985, 1, 1, 0, 0, 0, 0)
date1 = datetime(2008, 7, 4, 12, 19, 19, 570865)
date2 = datetime(2019, 6, 26, 12, 31, 6, 930575)
seconds1 = (date1 - epoch).total_seconds()
seconds2 = (date2 - epoch).total_seconds()
assert_token(YMDHMS, [seconds1], [80704121919.570865], approx=True)
assert_token(YMDHMS, [seconds2], [190626123106.930575], approx=True)
assert_token(
YMDHMS,
[np.array([seconds1, seconds2])],
[np.array([80704121919.570865, 190626123106.930575])],
approx=True,
)
# extra stack elements
assert_token(YMDHMS, [0, seconds1], [0, 80704121919.570865], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
YMDHMS([], {})
class TestSUMOperator:
def test_repr(self):
assert repr(SUM) == "SUM"
def test_pops(self):
assert SUM.pops == 1
def test_puts(self):
assert SUM.puts == 1
def test_no_copy(self):
assert copy(SUM) is SUM
assert deepcopy(SUM) is SUM
def test_call(self):
assert_token(SUM, [2], [2])
assert_token(SUM, [-2], [-2])
assert_token(SUM, [float("nan")], [0])
assert_token(SUM, [np.array([4, -1])], [3])
assert_token(SUM, [np.array([-4, 1])], [-3])
assert_token(SUM, [np.array([1, np.nan, 3])], [4])
assert_token(SUM, [np.array([np.nan])], [0])
# extra stack elements
assert_token(SUM, [0, 2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUM([], {})
class TestDIFFOperator:
def test_repr(self):
assert repr(DIF) == "DIF"
def test_pops(self):
assert DIF.pops == 1
def test_puts(self):
assert DIF.puts == 1
def test_no_copy(self):
assert copy(DIF) is DIF
assert deepcopy(DIF) is DIF
def test_call(self):
assert_token(DIF, [2], [np.array([np.nan])])
assert_token(DIF, [np.array([1, 2])], [np.array([np.nan, 1])])
assert_token(DIF, [np.array([1, 2, 5])], [np.array([np.nan, 1, 3])])
assert_token(
DIF, [np.array([1, np.nan, 5])], [np.array([np.nan, np.nan, np.nan])]
)
# extra stack elements
assert_token(DIF, [0, 2], [0, np.array([np.nan])])
with pytest.raises(StackUnderflowError):
DIF([], {})
class TestDUPOperator:
def test_repr(self):
assert repr(DUP) == "DUP"
def test_pops(self):
assert DUP.pops == 1
def test_puts(self):
assert DUP.puts == 2
def test_no_copy(self):
assert copy(DUP) is DUP
assert deepcopy(DUP) is DUP
def test_call(self):
assert_token(DUP, [2], [2, 2])
assert_token(DUP, [np.array([4, -1])], [np.array([4, -1]), np.array([4, -1])])
# extra stack elements
assert_token(DUP, [0, 2], [0, 2, 2])
with pytest.raises(StackUnderflowError):
DUP([], {})
class TestDIVOperator:
def test_repr(self):
assert repr(DIV) == "DIV"
def test_pops(self):
assert DIV.pops == 2
def test_puts(self):
assert DIV.puts == 1
def test_no_copy(self):
assert copy(DIV) is DIV
assert deepcopy(DIV) is DIV
def test_call(self):
assert_token(DIV, [10, 2], [5])
assert_token(DIV, [10, np.array([2, 5])], [np.array([5, 2])])
assert_token(DIV, [np.array([10, 4]), 2], [np.array([5, 2])])
assert_token(DIV, [np.array([8, 16]), np.array([2, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(DIV, [0, 10, 2], [0, 5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
DIV([], {})
with pytest.raises(StackUnderflowError):
DIV([1], {})
class TestPOWOperator:
def test_repr(self):
assert repr(POW) == "POW"
def test_pops(self):
assert POW.pops == 2
def test_puts(self):
assert POW.puts == 1
def test_no_copy(self):
assert copy(POW) is POW
assert deepcopy(POW) is POW
def test_call(self):
assert_token(POW, [1, 2], [1])
assert_token(POW, [2, 2], [4])
assert_token(POW, [2, 4], [16])
assert_token(POW, [2, np.array([1, 2, 3])], [np.array([2, 4, 8])])
assert_token(POW, [np.array([1, 2, 3]), 2], [np.array([1, 4, 9])])
assert_token(POW, [np.array([2, 3]), np.array([5, 6])], [np.array([32, 729])])
# extra stack elements
assert_token(POW, [0, 2, 4], [0, 16])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POW([], {})
with pytest.raises(StackUnderflowError):
POW([1], {})
class TestFMODOperator:
def test_repr(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_pops(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_puts(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_no_copy(self):
assert copy(FMOD) is FMOD
assert deepcopy(FMOD) is FMOD
def test_call(self):
assert_token(FMOD, [1, 2], [1])
assert_token(FMOD, [2, 10], [2])
assert_token(FMOD, [12, 10], [2])
assert_token(FMOD, [13, np.array([10, 100])], [np.array([3, 13])])
assert_token(FMOD, [np.array([7, 15]), 10], [np.array([7, 5])])
assert_token(FMOD, [np.array([7, 15]), np.array([10, 5])], [np.array([7, 0])])
# extra stack elements
assert_token(FMOD, [0, 12, 10], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FMOD([], {})
with pytest.raises(StackUnderflowError):
FMOD([1], {})
class TestMINOperator:
def test_repr(self):
assert repr(MIN) == "MIN"
def test_pops(self):
assert MIN.pops == 2
def test_puts(self):
assert MIN.puts == 1
def test_no_copy(self):
assert copy(MIN) is MIN
assert deepcopy(MIN) is MIN
def test_call(self):
assert_token(MIN, [2, 3], [2])
assert_token(MIN, [3, 2], [2])
assert_token(MIN, [2, np.array([1, 3])], [np.array([1, 2])])
assert_token(MIN, [np.array([1, 3]), 2], [np.array([1, 2])])
assert_token(MIN, [np.array([2, 3]), np.array([3, 2])], [np.array([2, 2])])
# # extra stack elements
assert_token(MIN, [0, 2, 3], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MIN([], {})
with pytest.raises(StackUnderflowError):
MIN([1], {})
class TestMAXOperator:
def test_repr(self):
assert repr(MAX) == "MAX"
def test_pops(self):
assert MAX.pops == 2
def test_puts(self):
assert MAX.puts == 1
def test_no_copy(self):
assert copy(MAX) is MAX
assert deepcopy(MAX) is MAX
def test_call(self):
assert_token(MAX, [2, 3], [3])
assert_token(MAX, [3, 2], [3])
assert_token(MAX, [2, np.array([1, 3])], [np.array([2, 3])])
assert_token(MAX, [np.array([1, 3]), 2], [np.array([2, 3])])
assert_token(MAX, [np.array([2, 3]), np.array([3, 2])], [np.array([3, 3])])
# # extra stack elements
assert_token(MAX, [0, 2, 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MAX([], {})
with pytest.raises(StackUnderflowError):
MAX([1], {})
class TestATAN2Operator:
def test_repr(self):
assert repr(ATAN2) == "ATAN2"
def test_pops(self):
assert ATAN2.pops == 2
def test_puts(self):
assert ATAN2.puts == 1
def test_no_copy(self):
assert copy(ATAN2) is ATAN2
assert deepcopy(ATAN2) is ATAN2
def test_call(self):
# NOTE: second parameter is x, first is y
assert_token(ATAN2, [0, 1], [0], approx=True)
assert_token(ATAN2, [1, math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN2, [1, 1], [math.pi / 4], approx=True)
assert_token(ATAN2, [math.sqrt(3), 1], [math.pi / 3], approx=True)
assert_token(ATAN2, [1, 0], [math.pi / 2], approx=True)
assert_token(
ATAN2, [math.sqrt(3), -1], [math.pi / 2 + math.pi / 6], approx=True
)
assert_token(ATAN2, [1, -1], [math.pi / 2 + math.pi / 4], approx=True)
assert_token(
ATAN2, [1, -math.sqrt(3)], [math.pi / 2 + math.pi / 3], approx=True
)
assert_token(ATAN2, [0, -1], [math.pi / 2 + math.pi / 2], approx=True)
assert_token(
ATAN2,
[
np.array([0, 1, 1, np.sqrt(3), 1, np.sqrt(3), 1, 1, 0]),
np.array([1, np.sqrt(3), 1, 1, 0, -1, -1, -np.sqrt(3), -1]),
],
[
np.array(
[
0.0,
np.pi / 6,
np.pi / 4,
np.pi / 3,
np.pi / 2,
np.pi / 2 + np.pi / 6,
np.pi / 2 + np.pi / 4,
np.pi / 2 + np.pi / 3,
np.pi / 2 + np.pi / 2,
]
)
],
approx=True,
)
# extra stack elements
assert_token(ATAN2, [0, 1, 1], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN2([], {})
class TestHYPOTOperator:
def test_repr(self):
assert repr(HYPOT) == "HYPOT"
def test_pops(self):
assert HYPOT.pops == 2
def test_puts(self):
assert HYPOT.puts == 1
def test_no_copy(self):
assert copy(HYPOT) is HYPOT
assert deepcopy(HYPOT) is HYPOT
def test_call(self):
assert_token(HYPOT, [1, 1], [math.sqrt(2)], approx=True)
assert_token(HYPOT, [math.sqrt(3), 1], [2], approx=True)
assert_token(
HYPOT,
[1, np.array([np.sqrt(3), 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), 1],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), np.array([1, 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
# extra stack elements
assert_token(HYPOT, [0, math.sqrt(3), 1], [0, 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
HYPOT([], {})
with pytest.raises(StackUnderflowError):
HYPOT([1], {})
class TestR2Operator:
def test_repr(self):
assert repr(R2) == "R2"
def test_pops(self):
assert R2.pops == 2
def test_puts(self):
assert R2.puts == 1
def test_no_copy(self):
assert copy(R2) is R2
assert deepcopy(R2) is R2
def test_call(self):
assert_token(R2, [2, 3], [13])
assert_token(R2, [2, np.array([3, 4])], [np.array([13, 20])])
assert_token(R2, [np.array([3, 4]), 2], [np.array([13, 20])])
assert_token(R2, [np.array([1, 2]), np.array([3, 4])], [np.array([10, 20])])
# extra stack elements
assert_token(R2, [0, 2, 3], [0, 13], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2([], {})
with pytest.raises(StackUnderflowError):
R2([1], {})
class TestEQOperator:
def test_repr(self):
assert repr(EQ) == "EQ"
def test_pops(self):
assert EQ.pops == 2
def test_puts(self):
assert EQ.puts == 1
def test_no_copy(self):
assert copy(EQ) is EQ
assert deepcopy(EQ) is EQ
def test_call(self):
assert_token(EQ, [2, 2], [True])
assert_token(EQ, [2, 3], [False])
assert_token(
EQ, [2, np.array([1, np.nan, 2])], [np.array([False, False, True])]
)
assert_token(
EQ, [np.array([1, np.nan, 2]), 2], [np.array([False, False, True])]
)
assert_token(
EQ,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([True, False, False, True])],
)
# extra stack elements
assert_token(EQ, [0, 2, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
EQ([], {})
with pytest.raises(StackUnderflowError):
EQ([1], {})
class TestNEOperator:
def test_repr(self):
assert repr(NE) == "NE"
def test_pops(self):
assert NE.pops == 2
def test_puts(self):
assert NE.puts == 1
def test_no_copy(self):
assert copy(NE) is NE
assert deepcopy(NE) is NE
def test_call(self):
assert_token(NE, [2, 2], [False])
assert_token(NE, [2, 3], [True])
assert_token(NE, [2, np.array([1, np.nan, 2])], [np.array([True, True, False])])
assert_token(NE, [np.array([1, np.nan, 2]), 2], [np.array([True, True, False])])
assert_token(
NE,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([False, True, True, False])],
)
# extra stack elements
assert_token(NE, [0, 2, 2], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NE([], {})
with pytest.raises(StackUnderflowError):
NE([1], {})
class TestLTOperator:
def test_repr(self):
assert repr(LT) == "LT"
def test_pops(self):
assert LT.pops == 2
def test_puts(self):
assert LT.puts == 1
def test_no_copy(self):
assert copy(LT) is LT
assert deepcopy(LT) is LT
def test_call(self):
assert_token(LT, [2, 3], [True])
assert_token(LT, [2, 2], [False])
assert_token(LT, [3, 2], [False])
assert_token(LT, [2, np.array([1, 2, 3])], [np.array([False, False, True])])
assert_token(LT, [np.array([1, 2, 3]), 2], [np.array([True, False, False])])
assert_token(
LT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, False, False])],
)
# extra stack elements
assert_token(LT, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LT([], {})
with pytest.raises(StackUnderflowError):
LT([1], {})
class TestLEOperator:
def test_repr(self):
assert repr(LE) == "LE"
def test_pops(self):
assert LE.pops == 2
def test_puts(self):
assert LE.puts == 1
def test_no_copy(self):
assert copy(LE) is LE
assert deepcopy(LE) is LE
def test_le(self):
assert_token(LE, [2, 3], [True])
assert_token(LE, [2, 2], [True])
assert_token(LE, [3, 2], [False])
assert_token(LE, [2, np.array([1, 2, 3])], [np.array([False, True, True])])
assert_token(LE, [np.array([1, 2, 3]), 2], [np.array([True, True, False])])
assert_token(
LE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, True, False])],
)
# # extra stack elements
assert_token(LE, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LE([], {})
with pytest.raises(StackUnderflowError):
LE([1], {})
class TestGTOperator:
def test_repr(self):
assert repr(GT) == "GT"
def test_pops(self):
assert GT.pops == 2
def test_puts(self):
assert GT.puts == 1
def test_no_copy(self):
assert copy(GT) is GT
assert deepcopy(GT) is GT
def test_call(self):
assert_token(GT, [2, 3], [False])
assert_token(GT, [2, 2], [False])
assert_token(GT, [3, 2], [True])
assert_token(GT, [2, np.array([1, 2, 3])], [np.array([True, False, False])])
assert_token(GT, [np.array([1, 2, 3]), 2], [np.array([False, False, True])])
assert_token(
GT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, False, True])],
)
# extra stack elements
assert_token(GT, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GT([], {})
with pytest.raises(StackUnderflowError):
GT([1], {})
class TestGEOperator:
def test_repr(self):
assert repr(GE) == "GE"
def test_pops(self):
assert GE.pops == 2
def test_puts(self):
assert GE.puts == 1
def test_no_copy(self):
assert copy(GE) is GE
assert deepcopy(GE) is GE
def test_call(self):
assert_token(GE, [2, 3], [False])
assert_token(GE, [2, 2], [True])
assert_token(GE, [3, 2], [True])
assert_token(GE, [2, np.array([1, 2, 3])], [np.array([True, True, False])])
assert_token(GE, [np.array([1, 2, 3]), 2], [np.array([False, True, True])])
assert_token(
GE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, True, True])],
)
# extra stack elements
assert_token(GE, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GE([], {})
with pytest.raises(StackUnderflowError):
GE([1], {})
class TestNANOperator:
def test_repr(self):
assert repr(NAN) == "NAN"
def test_pops(self):
assert NAN.pops == 2
def test_puts(self):
assert NAN.puts == 1
def test_no_copy(self):
assert copy(NAN) is NAN
assert deepcopy(NAN) is NAN
def test_call(self):
assert_token(NAN, [2, 2], [float("nan")])
assert_token(NAN, [2, 3], [2])
assert_token(NAN, [2, np.array([2, 3])], [np.array([np.nan, 2])])
assert_token(NAN, [np.array([2, 3]), 2], [np.array([np.nan, 3])])
assert_token(
NAN, [np.array([1, 2, 3]), np.array([3, 2, 1])], [np.array([1, np.nan, 3])]
)
# as float
assert_token(
NAN,
[np.array([1.0, 2.0, 3.0]), np.array([3, 2, 1])],
[np.array([1, np.nan, 3])],
approx=True,
)
# extra stack elements
assert_token(NAN, [0, 2, 2], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NAN([], {})
with pytest.raises(StackUnderflowError):
NAN([1], {})
class TestANDOperator:
def test_repr(self):
assert repr(AND) == "AND"
def test_pops(self):
assert AND.pops == 2
def test_puts(self):
assert AND.puts == 1
def test_no_copy(self):
assert copy(AND) is AND
assert deepcopy(AND) is AND
def test_call(self):
assert_token(AND, [2, 3], [2])
assert_token(AND, [float("nan"), 3], [3])
assert_token(AND, [float("nan"), np.array([2, 3])], [np.array([2, 3])])
assert_token(AND, [np.array([np.nan, 3]), 2], [np.array([2, 3])])
assert_token(
AND,
[np.array([10, np.nan, 30]), np.array([1, 2, 3])],
[np.array([10, 2, 30])],
)
# extra stack elements
assert_token(AND, [0, float("nan"), 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
AND([], {})
with pytest.raises(StackUnderflowError):
AND([1], {})
class TestOROperator:
def test_repr(self):
assert repr(OR) == "OR"
def test_pops(self):
assert OR.pops == 2
def test_puts(self):
assert OR.puts == 1
def test_no_copy(self):
assert copy(OR) is OR
assert deepcopy(OR) is OR
def test_call(self):
assert_token(OR, [2, 3], [2])
assert_token(OR, [2, float("nan")], [float("nan")])
assert_token(OR, [2, np.array([3, np.nan])], [np.array([2, np.nan])])
assert_token(OR, [np.array([2, 3]), np.nan], [np.array([np.nan, np.nan])])
assert_token(
OR,
[np.array([1, 2, 3]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# as float
assert_token(
OR,
[np.array([1.0, 2.0, 3.0]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# extra stack elements
assert_token(OR, [0, 2, float("nan")], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
OR([], {})
with pytest.raises(StackUnderflowError):
OR([1], {})
class TestIANDOperator:
def test_repr(self):
assert repr(IAND) == "IAND"
def test_pops(self):
assert IAND.pops == 2
def test_puts(self):
assert IAND.puts == 1
def test_no_copy(self):
assert copy(IAND) is IAND
assert deepcopy(IAND) is IAND
def test_call(self):
assert_token(IAND, [5, 3], [1])
assert_token(IAND, [15, 21], [5])
assert_token(IAND, [21, 15], [5])
assert_token(IAND, [15, np.array([9, 21, 35])], [np.array([9, 5, 3])])
assert_token(IAND, [np.array([9, 21, 35]), 15], [np.array([9, 5, 3])])
assert_token(
IAND,
[np.array([9, 21, 35]), np.array([3, 15, 127])],
[np.array([1, 5, 35])],
)
# extra stack elements
assert_token(IAND, [0, 15, 21], [0, 5])
# floats are not supported
with pytest.raises(TypeError):
IAND([1.0, 2], {})
with pytest.raises(TypeError):
IAND([1, 2.0], {})
with pytest.raises(TypeError):
IAND([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
IAND([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
IAND([], {})
with pytest.raises(StackUnderflowError):
IAND([1], {})
class TestIOROperator:
def test_repr(self):
assert repr(IOR) == "IOR"
def test_pops(self):
assert IOR.pops == 2
def test_puts(self):
assert IOR.puts == 1
def test_no_copy(self):
assert copy(IOR) is IOR
assert deepcopy(IOR) is IOR
def test_call(self):
assert_token(IOR, [5, 3], [7])
assert_token(IOR, [15, 21], [31])
assert_token(IOR, [21, 15], [31])
assert_token(IOR, [15, np.array([9, 21, 35])], [ | np.array([15, 31, 47]) | numpy.array |
#! /usr/bin/env python
from __future__ import division
from builtins import range
from LLC_Membranes.llclib import file_rw, transform, topology
import mdtraj as md
import numpy as np
import matplotlib.path as mplPath
import mdtraj as md
from random import randint
import tqdm
class region:
"""
Define a region as an extrusion of a polygon in the z direction
"""
def __init__(self, sides):
"""
:param sides: number of sides making up the region in the xy plane
:return: region
"""
self.sides = sides
def xyregion(self, corners):
"""
:param corners: points defining the corners of the polygon making up the xy region
:return: a region defined by corners
"""
path = mplPath.Path(corners)
def thickness(filename, ref_atoms, grid, *traj, **kwargs):
"""
:param filename: name of .gro file
:param ref_atoms: atoms which thickness will be based on
:param traj: trajectory of positions
:return: trajectory of thicknesses or single thickness based on max/min z coordinate of reference atoms
"""
if traj:
traj = np.asarray(traj)[0] # optional arguments of the form *args need to be convert back to numpy arrays
nT = traj.shape[0] # number of trajectory points
thick = np.zeros([nT])
z_max = np.zeros([nT])
z_min = np.zeros([nT])
thick_std = np.zeros([nT])
for t in range(nT):
z_max_t = max(traj[t, :, 2])
z_min_t = min(traj[t, :, 2])
thick[t] = z_max_t - z_min_t
z_max[t] = z_max_t
z_min[t] = z_min_t
else:
f = open(filename, "r") # .gro file whose positions of Na ions will be read
a = [] # list to hold lines of file
for line in f:
a.append(line)
line = 0
while a[line].count('HII') == 0:
line += 1
if grid:
t = md.load(filename)
pos = t.xyz[0, :, :] # positions of all atoms
if kwargs['exclude']:
keep = [a.index for a in t.topology.atoms if a.residue.name != kwargs['exclude']]
pos = t.atom_slice(keep).xyz[0, :, :]
# define boundaries of each grid area
grid_res = kwargs['grid_res']
nregions = (grid_res - 1) ** 2
g = np.zeros([2, grid_res, grid_res])
dims = a[-1].split()
xbox = np.linalg.norm([dims[0], dims[3], dims[4]])
ybox = np.linalg.norm([dims[1], dims[5], dims[6]])
yangle = np.arctan(float(dims[1])/abs(float(dims[5])))
for i in range(grid_res):
g[0, i, :] = np.linspace(0, xbox, grid_res) + (float(i) / grid_res)*float(dims[5])
g[1, :, i] = np.linspace(0, ybox, grid_res)*np.sin(yangle)
corners = np.zeros([nregions, 4, 2])
zmaxes = np.zeros([nregions])
zmins = np.zeros([nregions])
thicks = np.zeros([nregions])
for i in range(grid_res - 1):
for j in range(grid_res - 1):
# define corners of grid region
r = i*(grid_res - 1) + j
corners[r, 0, :] = g[:, i, j]
corners[r, 1, :] = g[:, i + 1, j]
corners[r, 2, :] = g[:, i + 1, j + 1]
corners[r, 3, :] = g[:, i, j + 1]
# create a region using the corners (corners need to be traced in order)
path = mplPath.Path(corners[r, :, :])
contained = path.contains_points(pos[:, :2]) # check whether each point is in the region
z = pos[np.where(contained), 2] # get the z position of all atoms contained in the region
zmaxes[r] = np.max(z)
zmins[r] = np.min(z)
thicks[r] = zmaxes[r] - zmins[r]
# bootstrap to get statistics
nboot = 2000
vmax = np.zeros([nboot])
vmin = np.zeros([nboot])
for i in range(nboot):
imax = randint(0, nregions - 1)
imin = randint(0, nregions - 1)
vmax[i] = zmaxes[imax]
vmin[i] = zmins[imin]
z_max = np.mean(vmax)
z_min = np.mean(vmin)
thick = np.mean(vmax - vmin)
thick_std = np.std(vmax - vmin)
else:
z = [] # list to hold z positions of all atoms
while a[line].count('HII') != 0:
if str.strip(a[line][11:15]) in ref_atoms:
z.append(float(a[line][36:44]))
line += 1
z_max = max(z)
z_min = min(z)
thick = z_max - z_min
thick_std = 0
return thick, z_max, z_min, thick_std
def conc(t, comp, b):
"""
Calculate the concentration of the specified component
:param t: mdtraj trajectory object for system being studied
:param comp: component which you want the concentration of
:param b: buffer. distance into membrane to go before starting calculation
:return: concentration
"""
from pymbar import timeseries
box = t.unitcell_vectors
equil = timeseries.detectEquilibration(box[:, 2, 2])[0]
thick = np.mean(box[equil:, 2, 2])
z_max = thick
z_min = 0
buffer = thick*b
z_max -= buffer
z_min += buffer
thick = z_max - z_min
# Calculate concentration (an average of all frames)
keep = [a.index for a in t.topology.atoms if a.name == comp]
t_comp = t.atom_slice(keep)
pos = t_comp.xyz
ncomp = pos.shape[1] # number of components in the simulation which you want the concentration of
nT = pos.shape[0]
if b > 0:
count = np.zeros([nT])
box_vol = np.zeros([nT])
cross = np.zeros([nT])
for t in range(nT):
x_dim = np.linalg.norm(box[t, 0, :])
y_dim = np.linalg.norm(box[t, 1, :])
cross[t] = x_dim*y_dim
box_vol[t] = x_dim*y_dim*thick
for c in range(ncomp):
if z_max >= pos[t, c, 2] >= z_min:
count[t] += 1
else:
count = ncomp*np.ones([nT])
box_vol = np.zeros([nT])
cross = np.zeros([nT])
for t in range(nT):
x_dim = np.linalg.norm(box[t, 0, :])
y_dim = np.linalg.norm(box[t, 1, :])
cross[t] = x_dim*y_dim
box_vol[t] = x_dim*y_dim*thick
factor = 1 / (1*10**-27) # convert from ions/nm^3 to ions/m^3
conc = np.zeros([nT])
for c in range(nT):
conc[c] = (count[c] / box_vol[c]) * factor
avg_conc = np.mean(conc)
std = np.std(conc)
avg_cross = np.mean(cross)
return avg_conc, std, avg_cross, thick, z_max, z_min
def avg_pore_loc(npores, pos, box, buffer=0, spline=False, npts=20, progress=False, bins=False, spline_name='spline.pl'):
""" Calculate average pore location for each pore at each frame
:param no_pores: the number of pores in the unit cell
:param pos: the coordinates of the component(s) which you are using to locate the pore centers
:param box: box vectors (t.unitcell_vectors when trajectory is load with mdtraj)
:param buffer: fraction (of membrane thickness) of top and bottom of membrane to exclude from p2p calculations
:param spline: trace pore centers with a spline
:param npts: number of points making up the spline in each pore
:param progress: show progress bar while constructing splines
:param bins: return the bin centers of each spline for plotting purposes
:param spline_name: name of spline. Include absolute path if not in same directory where script was run
:type no_pores: int
:type pos: numpy.ndarray, shape(ncomponents, 3) or numpy.ndarray, shape(nframes, ncomponents, 3)
:type buffer: float
:type spline: bool
:type box: numpy.ndarray, shape(nframes, 3, 3)
:type npts: int
:type progress: bool
:type bins: bool
:type spline_name: str
:return: numpy array containing the x, y coordinates of the center of each pore at each frame
"""
# Find the average location of the pores w.r.t. x and y
if spline:
if box is None:
print('You must supply box vectors if you are to trace the pores with a spline')
exit()
else:
print('Calculating pore spline...')
centers, bin_centers = trace_pores(pos, box, npts, npores=4, progress=progress, savename=spline_name)
if bins:
return centers, bin_centers
else:
return centers
else:
if len(pos.shape) == 3: # multiple frames
nT = np.shape(pos)[0]
comp_ppore = np.shape(pos)[1] // npores
p_center = np.zeros([nT, npores, 2])
for i in range(nT):
positions = wrap_box(pos[i, ...], box[i, ...])
if buffer > 0:
include = np.full(pos.shape[1], True)
include[np.where(pos[i, :, 2] > box[i, 2, 2] + buffer)] = False
include[np.where(pos[i, :, 2] < buffer)] = False
for j in range(npores):
p_center[i, j, :] = positions[comp_ppore * j:comp_ppore * (j + 1), :2].mean(axis=0)
count = 0
for k in range(comp_ppore * j, comp_ppore * (j + 1)):
if include[k]:
p_center[i, j, :] += positions[k, :2]
count += 1
p_center[i, j, :] /= count # take the average
else:
for j in range(npores):
p_center[i, j, :] = positions[comp_ppore*j:comp_ppore*(j + 1), :2].mean(axis=0)
elif len(pos.shape) == 2: # single frame
comp_ppore = pos.shape[0] // npores
p_center = np.zeros([npores, 2])
for j in range(npores):
for k in range(comp_ppore*j, comp_ppore*(j + 1)):
p_center[j, :] += pos[k, :2]
p_center[j, :] /= comp_ppore
else:
return 'Please use a position array with valid dimensions'
exit()
return p_center
def p2p(p_centers, distances):
"""
:param p_centers: the x, y locations of the pore centers in the format return from avg_pore_loc()
:param distances: the number of distinct distances between pores
:return: all of the pore to pore distances
"""
nT = np.shape(p_centers)[2]
p2ps = np.zeros([distances, nT]) # distances in the order 1-2, 1-3, 1-4, 2-3, 2-4, 3-4
for i in range(nT):
# So ugly ... sadness :(
p2ps[0, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 1, i])
p2ps[1, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 2, i])
p2ps[2, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 3, i])
p2ps[3, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 2, i])
p2ps[4, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 3, i])
p2ps[5, i] = np.linalg.norm(p_centers[:, 2, i] - p_centers[:, 3, i])
return p2ps
def limits(pos, pcenters):
"""
Estimate the pore 'radius' based on the position of some component and it's maximum deviation from the pore center
:param: pos: the positions of all atoms included in making the estimate
:param: pcenters: the x,y positions of the pore centers for each frame
:return: an approximate pore radius. Beyond which, we have entered the alkane region
"""
nT = pcenters.shape[0]
npores = pcenters.shape[1]
natoms = pos.shape[1]
atom_ppore = natoms // npores
deviation = np.zeros([nT, npores, atom_ppore])
for f in tqdm.tqdm(range(nT)):
for i in range(atom_ppore):
for j in range(npores):
deviation[f, j, i] = np.linalg.norm(pos[f, j*atom_ppore + i, :2] - pcenters[f, j, :])
#deviation = np.reshape(deviation, (nT, natoms))
fr = np.zeros([nT])
frstd = np.zeros([nT])
#
# for i in range(nT):
# fr[i] = np.mean(deviation[i, :]) # + np.std(deviation[i, :]) # maybe?
# frstd[i] = np.std(deviation[i, :])
radii = np.zeros([nT, npores])
for t in range(nT):
for p in range(npores):
radii[t, p] = np.mean(deviation[t, p, :])
return radii
def put_in_box(pt, x_box, y_box, m, angle):
"""
:param pt: The point to place back in the box
:param x_box: length of box in x dimension
:param y_box: length of box in y dimension
:param m: slope of box vector
:param angle: angle between x axis and y box vector
:return: coordinate shifted into box
"""
b = - m * x_box # y intercept of box vector that does not pass through origin (right side of box)
if pt[1] < 0:
pt[:2] += [np.cos(angle)*x_box, np.sin(angle)*x_box] # if the point is under the box
if pt[1] > y_box:
pt[:2] -= [np.cos(angle)*x_box, np.sin(angle)*x_box]
if pt[1] > m*pt[0]: # if the point is on the left side of the box
pt[0] += x_box
if pt[1] < (m*pt[0] + b): # if the point is on the right side of the box
pt[0] -= x_box
return pt
def trace_pores(pos, box, npoints, npores=4, progress=True, save=True, savename='spline.pl'):
"""
Find the line which traces through the center of the pores
:param pos: positions of atoms used to define pore location (args.ref) [natoms, 3]
:param box: xy box vectors, [2, 2], mdtraj format (t.unitcell_vectors)
:param npoints: number of points for spline in each pore
:param npores: number of pores in unit cell (assumed that atoms are number sequentially by pore. i.e. pore 1 atom
numbers all precede those in pore 2)
:param progress: set to True if you want a progress bar to be shown
:param save: save spline as pickled object
:param savename: path to spline. If absolute path is not provided, will look in current directory
:type pos: np.ndarray
:type box: np.ndarray
:type npoints: int
:type npores: int
:type progress: bool
:type save: bool
:type savename: str
:return: points which trace the pore center
"""
try:
print('Attempting to load spline ... ', end='', flush=True)
spline = file_rw.load_object(savename)
print('Success!')
return spline[0], spline[1]
except FileNotFoundError:
print('%s not found ... Calculating spline' % savename)
single_frame = False
if np.shape(pos.shape)[0] == 2:
pos = pos[np.newaxis, ...] # add a new axis if we are looking at a single frame
box = box[np.newaxis, ...]
single_frame = True
nframes = pos.shape[0]
atoms_p_pore = int(pos.shape[1] / npores) # atoms in each pore
v = np.zeros([nframes, 4, 2]) # vertices of unitcell box
bounds = []
v[:, 0, :] = [0, 0]
v[:, 1, 0] = box[:, 0, 0]
v[:, 3, :] = np.vstack((box[:, 1, 0], box[:, 1, 1])).T
v[:, 2, :] = v[:, 3, :] + np.vstack((box[:, 0, 0], np.zeros([nframes]))).T
center = np.vstack((np.mean(v[..., 0], axis=1), np.mean(v[..., 1], axis=1), np.zeros(nframes))).T
for t in range(nframes):
bounds.append(mplPath.Path(v[t, ...])) # create a path tracing the vertices, v
angle = np.arcsin(box[:, 1, 1]/box[:, 0, 0]) # specific to case where magnitude of x and y box lengths are equal
angle = np.where(box[:, 1, 0] < 0, angle + np.pi / 2, angle) # haven't tested this well yet
m = (v[:, 3, 1] - v[:, 0, 1]) / (v[:, 3, 0] - v[:, 0, 0]) # slope from points connecting first and third vertices
centers = np.zeros([nframes, npores, npoints, 3])
bin_centers = np.zeros([nframes, npores, npoints])
for t in tqdm.tqdm(range(nframes), disable=(not progress)):
for p in range(npores):
pore = pos[t, p*atoms_p_pore:(p+1)*atoms_p_pore, :] # coordinates for atoms belonging to a single pore
while np.min(pore[:, 2]) < 0 or np.max(pore[:, 2]) > box[t, 2, 2]: # because cross-linked configurations can extend very far up and down
pore[:, 2] = np.where(pore[:, 2] < 0, pore[:, 2] + box[t, 2, 2], pore[:, 2])
pore[:, 2] = np.where(pore[:, 2] > box[t, 2, 2], pore[:, 2] - box[t, 2, 2], pore[:, 2])
_, bins = np.histogram(pore[:, 2], bins=npoints) # bin z-positions
section_indices = np.digitize(pore[:, 2], bins) # list that tells which bin each atom belongs to
bin_centers[t, p, :] = [(bins[i] + bins[i + 1])/2 for i in range(npoints)]
for l in range(1, npoints + 1):
atom_indices = np.where(section_indices == l)[0]
before = pore[atom_indices[0], :] # choose the first atom as a reference
shift = transform.translate(pore[atom_indices, :], before, center[t, :]) # shift everything to towards the center
for i in range(shift.shape[0]): # check if the points are within the bounds of the unitcell
while not bounds[t].contains_point(shift[i, :2]):
shift[i, :] = put_in_box(shift[i, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t]) # if its not in the unitcell, shift it so it is
c = [np.mean(shift, axis=0)]
centers[t, p, l - 1, :] = transform.translate(c, center[t, :], before) # move everything back to where it was
while not bounds[t].contains_point(centers[t, p, l - 1, :]): # make sure everything is in the box again
centers[t, p, l - 1, :] = put_in_box(centers[t, p, l - 1, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t])
if single_frame:
return centers[0, ...] # doesn't return bin center yet
else:
if save:
file_rw.save_object((centers, bin_centers), savename)
return centers, bin_centers
def center_of_mass(pos, mass_atoms):
""" Calculate center of mass of residues over a trajectory
:param pos: xyz coordinates of atoms
:param mass_atoms : mass of atoms in order they appear in pos
:type pos: np.array (nframes, natoms, 3)
:type mass_atoms: list
:return: center of mass of each residue at each frame
"""
nframes = pos.shape[0]
natoms = len(mass_atoms)
com = np.zeros([nframes, pos.shape[1] // natoms, 3]) # track the center of mass of each residue
for f in range(nframes):
for i in range(com.shape[1]):
w = (pos[f, i * natoms:(i + 1) * natoms, :].T * mass_atoms).T # weight each atom in the residue by its mass
com[f, i, :] = np.sum(w, axis=0) / sum(mass_atoms) # sum the coordinates and divide by the mass of the residue
return com
def residue_center_of_mass(t, res):
""" Calculate the center of mass versus time of a residue in an MD trajectory
:param t: mdtraj trajectory object
:param res: name of residue to track
:type t: object
:type res: str
:return: center of mass of residue versus time
"""
residue = topology.Residue(res) # get resiude attributes
ndx = [a.index for a in t.topology.atoms if a.residue.name == res] # index of all residue atoms
names = [a.name for a in t.topology.atoms if a.residue.name == res][:residue.natoms] # names of atoms in one residue
mass = [residue.mass[x] for x in names] # mass of atoms in order that they appear in file
print('Calculating center of mass trajectories of residue %s' % residue.name)
return center_of_mass(t.xyz[:, ndx, :], mass) # determine center of mass trajectories
def compdensity(coord, pore_centers, box, cut=1.5, nbins=50, spline=False):
""" Measure the density of a component as a function of the distance from the pore centers.
:param coord: the coordinates of the component(s) which you want a radial distribution of at each frame
:param pore_centers: a numpy array of the locations of each pore center at each trajectory frame
:param cut: cutoff distance for distance calculations. Will not count anything further than cut from the pore center
:param nbins: number of bins in r direction
:param spline: calculate RDF with respect to spline
:type coord: numpy.ndarray
:type pore_centers: numpy.ndarray
:type cut: float
:type nbins: int
:type spline: bool
:return: Radial distance from pore center r, and the density of a species, whose positions are defined by
`coordinates`, as a function the distance from the pore center.
"""
nT = coord.shape[0]
pores = pore_centers.shape[1]
density = np.zeros([nT, nbins]) # number / nm^3
for t in tqdm.tqdm(range(nT), unit=' Frames'):
for p in range(pores):
if spline:
distances = radial_distance_spline(pore_centers[t, p, ...], coord[t, ...], box[t, ...])
else:
distances = np.linalg.norm(coord[t, :, :2] - pore_centers[t, p, :], axis=1)
hist, bin_edges = np.histogram(distances, bins=nbins, range=(0, cut))
density[t, :] += hist
density[t, :] /= (pores * box[t, 2, 2]) # normalize by z-dimension
# normalize based on volume of anulus where bin is located (just need to divide by area since height done above)
r = np.zeros([nbins])
for i in range(nbins):
density[:, i] /= (np.pi * (bin_edges[i + 1] ** 2 - bin_edges[i] ** 2))
r[i] = (bin_edges[i + 1] + bin_edges[i]) / 2 # center of bins
return r, density
def distance_from_pore_center(coord, pore_centers, box, spline=False):
""" Measure the density of a component as a function of the distance from the pore centers.
:param coord: the coordinates of the component(s) which you want a radial distribution of at each frame
:param pore_centers: a numpy array of the locations of each pore center at each trajectory frame
:param cut: cutoff distance for distance calculations. Will not count anything further than cut from the pore center
:param
:type coord: numpy.ndarray
:type pore_centers: numpy.ndarray
:type cut: float
:return: Radial distance of each individual solute/component, defined by coords, as a function of time
"""
nT = coord.shape[0]
pores = pore_centers.shape[1]
nsolute = coord.shape[1]
r_distances = np.zeros([nT, nsolute])
for t in tqdm.tqdm(range(nT), unit=' Frames'):
rd = np.zeros([nsolute, pores])
for p in range(pores):
if spline:
rd[:, p] = radial_distance_spline(pore_centers[t, p, ...], coord[t, ...], box[t, ...])
else:
rd[:, p] = np.linalg.norm(coord[t, :, :2] - pore_centers[t, p, :], axis=1)
# Move the minimum solute--pore-center distance for each solute to the first index of rd
# This removes any assumption that there is a constant number of solutes per pore and that the solute
# stays in the same pore.
for i, r in enumerate(rd): # there is probably a vectorized way to do this with argsort
rd[i, :] = r[np.argsort(r)]
r_distances[t, :] = rd[:, 0]
return r_distances
def radial_distance_spline(spline, com, box):
""" Calculate radial distance from pore center based on distance from center of mass to closest z point in spline
:param spline: coordinates of spline for a single pore and frame
:param com: atomic center of mass z-coordinates
:param zbox: z box dimension (nm)
:type spline: np.ndarray [npts_spline, 3]
:type com: np.ndarray [n_com, 3]
:type zbox: float
:return: array of distances from pore center
"""
edges = np.zeros([spline.shape[0] + 1])
edges[1:-1] = ((spline[1:, 2] - spline[:-1, 2]) / 2) + spline[:-1, 2]
edges[-1] = box[2, 2]
com = wrap_box(com, box)
# while np.min(com[:, 2]) < 0 or np.max(com[:, 2]) > zbox: # because cross-linked configurations can extend very far up and down
# com[:, 2] = np.where(com[:, 2] < 0, com[:, 2] + zbox, com[:, 2])
# com[:, 2] = np.where(com[:, 2] > zbox, com[:, 2] - zbox, com[:, 2])
zbins = np.digitize(com[:, 2], edges)
# handle niche case where coordinate lies exactly on the upper or lower bound
zbins = np.where(zbins == 0, zbins + 1, zbins)
zbins = np.where(zbins == edges.size, zbins - 1, zbins)
return np.linalg.norm(com[:, :2] - spline[zbins - 1, :2], axis=1)
def minimum_image_distance(dist, box):
""" Calculate minimum image distances from a vector of distances. This assumes a monoclinic unit cell where the x
box vector is fixed along the x-axis, the z-box vector is perpendicular to the xy plane, and the y-box vector makes
an angle, theta, with the x-axis.
:param d: a vector of distances (n, 3) where n is number of points
:param box: box vectors meant to enclose d, mdtraj format: (3, 3)
:return:
"""
x_box = box[0, 0] # length of x-box vector
y_box = box[1, 1] # perpendicular distance from x-axis to top of box in y-direction
z_box = box[2, 2] # length of z-box vector
d = np.copy(dist)
angle = np.arcsin(y_box / x_box) # angle between y-box vector and x-box vector in radians
# check x coordinates
while np.max(np.abs(d[:, 0])) > 0.5*x_box: # iterate in case subtracting/adding box vector length once isn't enough
d[:, 0] = np.where(d[:, 0] > 0.5*x_box, d[:, 0] - x_box, d[:, 0])
d[:, 0] = np.where(d[:, 0] < -0.5*x_box, d[:, 0] + x_box, d[:, 0])
# check y coordinates
while np.amax( | np.abs(d[:, 1]) | numpy.abs |
from os.path import expanduser, join
from os import makedirs
import pkg_resources
import pandas as pd
import numpy as np
# import cobra as cb
#import cplex as cp
import matplotlib.pyplot as plt
import seaborn as sb
from cycler import cycler
import time
def dfba_oldschool(x0,y0,T,dt,model_li,alphas_li,V,infl,ddict):
'''imliments dynamic fba for a community using the algorithm from <NAME> Palsson
give x0 and model_li as lists, ddict as dictionary of dilutions,infl as inflow dict
'''
t1 = time.time()
mess = "Complete"
num_sp = len(x0)
if len(model_li) != num_sp:
print('missing a model or two...')
return None
t = np.array([0])
x = np.array([x0])
x_dict = dict(zip([mod.name for mod in model_li],x0))
xkys = list(x_dict.keys())
ykys = list(y0.keys())
y_out = np.array([[y0[yk] for yk in ykys]])
y = y0.copy()
media_keys = dict()
rev_med_keys = dict()
for mod in model_li:
media_keys[mod.name] = dict([(ky,mod.reactions.get_by_id(ky).name) for ky in list(mod.medium.keys())])
rev_med_keys[mod.name] = dict([(mod.reactions.get_by_id(ky).name,ky) for ky in list(mod.medium.keys())])
initime = time.time() - t1
reoptime = 0
optcount = 0
#Reconcile the medium files with the exchanged metabolites - for each model we need a dict of
#metabolites:reaction. Maybe pass that in with the initial conditions?
while t[-1]<T:
#update the media and optimize
optimal_growths = dict()
optimal_fluxes = dict()
if any(np.array(list(y.values())) < 0):
print('dfba_oldschool: overdepleated resource')
mess = "Failed"
break
t3 = time.time()
for mod in model_li:
#set the media based on available metabolites
tmp_med = mod.medium
kydict = media_keys[mod.name]
al_dict = alphas_li[mod.name]#need alphas_li to be a dict of dicts with keys model name and rxn_id
for ky in tmp_med:
tmp_med[ky] = al_dict[ky]*y[kydict[ky]]/V
mod.medium = tmp_med
#optimize
modsol = mod.optimize()
optimal_growths[mod.name] = modsol.objective_value
optimal_fluxes[mod.name] = modsol.fluxes
reoptime += time.time() - t3
optcount += 1
tmp_xd = {}
for sp in x_dict:
tmp_xd[sp] = x_dict[sp]*np.exp(dt*(optimal_growths[sp] - ddict[sp]))
y_new = y
for yi in y_new:
yn = y_new[yi]
for j in model_li:
jnm = j.name
if yi in rev_med_keys[jnm].keys():
if optimal_growths[jnm]-ddict[jnm] != 0:
yn += (-optimal_fluxes[jnm].loc[rev_med_keys[jnm][yi]]/(optimal_growths[jnm]-ddict[jnm]))*x_dict[jnm]*(1-np.exp(dt*(optimal_growths[sp]-ddict[sp])))
else:
yn += -dt*optimal_fluxes[jnm].loc[rev_med_keys[jnm][yi]]*x_dict[jnm]
y_new[yi] = yn + infl[yi]*dt
x_dict = tmp_xd
t = np.append(t,[t[-1]+dt])
x = np.append(x,[[x_dict[xk] for xk in xkys]],axis=0)
y_out = np.append(y_out,[[y_new[yk] for yk in ykys]],axis=0)
y = y_new
t2 = time.time() - t1
minuts,sec = divmod(t2,60)
print("End t = ",t[-1])
print("-----")
print("-----")
print("dfba_oldschool: ", mess, " in ",int(minuts)," minutes, ",sec," seconds.")
print("dfba_oldschool: Initialization was ", 100*initime/t2, "% of computational time.")
print("dfba_oldschool: Reinitialization was ", 100*reoptime/t2, "% of computational time.")
print("dfba_oldschool: Required ",optcount," reinitializations.")
x_asdict = dict([(xkys[i],x[:,i]) for i in range(len(xkys))])
y_asdict = dict([(ykys[i],y_out[:,i]) for i in range(len(ykys))])
return x_asdict,y_asdict,t
def optimize_noncobra(toy,available):
G1,G2,obj,inner_bounds,als,lwex = toy
intk = als*available
for i in range(len(lwex)):
if lwex[i] > intk[i]:
lwex[i] = intk[i]
# print(lwex,intk)
MatrixA = np.concatenate([G1,G1,G2],axis = 0)
growth = cp.Cplex()
growth.set_results_stream(None)
growth.set_warning_stream(None)
sparms = ['w'+str(j) for j in range(MatrixA.shape[1])]
s_lbs = np.array(inner_bounds[0]).astype(float)
s_ubs = np.array(inner_bounds[1]).astype(float)
growth.variables.add(obj = obj, lb = s_lbs, ub = s_ubs, names = sparms)
growth.objective.set_sense(growth.objective.sense.maximize)
g1p2 = [list(g.astype(float)) for g in MatrixA]
g1p2_wi = [[sparms, g] for g in g1p2]
bds_vec = np.concatenate([lwex,intk,np.zeros(len(G2))])
bdtypes = 'G'*len(G1)+'L'*len(G1)+'E'*len(G2)
growth.linear_constraints.add(lin_expr = g1p2_wi, senses = bdtypes, rhs = bds_vec)
growth.solve()
grate = growth.solution.get_objective_value()
wi = growth.solution.get_values()
ydots = np.dot(G1,wi)
return [grate,ydots]
def dfba_oldschool_notcobra(x0,y0,T,dt,model_li,infl,dlist,V = 1):
'''impliments dynamic fba for a community using the algorithm from Varma and Palsson
give x0 and model_li as lists
model_li should be lists of lists [G1,G2,obj,inner_bounds,als,exchng_lower_bounds]
'''
num_sp = len(x0)
if len(model_li) != num_sp:
print('missing a model or two...')
return None
t = np.array([0])
x = np.array([x0])
y = np.array([y0])
while t[-1]<T:
optimal_growths = np.empty(len(x[-1]))
optimal_fluxes = np.empty((len(x[-1]),len(y[-1])))
if any(y[-1] < 0):
print('overdepleated resource')
break
j = 0
for mod in model_li:
# print(mod[5])
gr,fl = optimize_noncobra(mod,y[-1])
optimal_growths[j] = gr
optimal_fluxes[j] = fl
j+=1
x_new = x[-1]*np.exp(dt*(optimal_growths-np.array(dlist)))
mult_vect = dt*x[-1]
for i in range(len(x[-1])):
if optimal_growths[i]-dlist[i] != 0:
mult_vect[i] = x[-1][i]/(optimal_growths[i]-dlist[i])*(1-np.exp(dt*(optimal_growths[i]-dlist[i])))
y_new = y[-1] + np.dot(optimal_fluxes.T,mult_vect) + infl*dt
t = np.append(t,[t[-1]+dt])
x = | np.append(x,[x_new],axis=0) | numpy.append |
# -*- coding:Utf-8 -*-
import pylayers.util.geomutil as geu
import pylayers.util.plotutil as plu
import shapely.geometry as shg
import numpy as np
from numpy.testing import (TestCase, assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite)
class Tesgeu(TestCase):
def test_onb(self):
print("testing geomutil.onb")
A = np.array([[0,0,0,0],[1,2,3,4],[0,0,0,0]])
B = np.array([[0,0,0,0],[1,2,3,4],[10,10,10,10]])
v = np.array([[1,1,1,1],[0,0,0,0],[0,0,0,0]])
T = geu.onb(A, B,v)
print( np.shape(T))
print( T[:,0,:])
print( T[:,1,:])
print( T[:,2,:])
assert_equal(np.shape(T),(4,3,3))
def test_ispoint(self):
print("testing geomutil.ispoint")
tpts= (np.array([[1,2,3],[5,6,7]]),np.array([-1,-2,-3]))
pt = np.array([[1],[5]])
k = geu.ispoint(tpts,pt)
assert_equal(k,-1)
pt = np.array([[2],[6]])
k = geu.ispoint(tpts,pt)
assert_equal(k,-2)
pt = np.array([[3],[7]])
k = geu.ispoint(tpts,pt)
assert_equal(k,-3)
pt = np.array([[13],[7]])
k = geu.ispoint(tpts,pt)
assert_equal(k,0)
def test_ptconvex2(self):
print("testing geomutil.ptconvex2")
points = shg.MultiPoint([(0, 0), (0, 1), (3.2, 1), (3.2, 0.7), (0.4, 0.7), (0.4, 0)])
polyg = geu.Polygon(points)
cvex, ccave = polyg.ptconvex2()
assert_equal(cvex,[-5] )
assert_equal(ccave,[-1, -2, -3, -4, -6] )
points = shg.MultiPoint([(0, 0), (0, 1), (-3.2, 1), (-3.2, 0.7), (-0.4, 0.7), (-0.4, 0)])
polyg = geu.Polygon(points)
cvex, ccave = polyg.ptconvex2()
assert_equal(cvex,[-5] )
assert_equal(ccave,[-1, -2, -3, -4, -6] )
def test_is_aligned(self):
print("testing is_aligned")
p1 = np.array([0,0])
p2 = np.array([1,0])
p3 = np.array([3,0])
p4 = np.array([4,0])
p5 = np.array([3,0.1])
p6 = np.array([4,0.1])
p7 = np.array([4,0.001])
b1 = geu.is_aligned4(p1,p2,p3,p4,tol=1e-7)
b2 = geu.is_aligned4(p1,p2,p5,p6,tol=1e-7)
b3 = geu.is_aligned4(p1,p2,p3,p7,tol=1e-1)
b4 = geu.is_aligned4(p1,p2,p3,p7,tol=1e-4)
assert b1
assert not b2
assert b3
assert not b4
def test_MATP(self):
print("test_MATP")
vl = np.array([0,0,1]) # beam in z direction
pl = np.array([1,0,0]) # polar along x
phi = np.pi/2 # beam in y direction
tilt = 0 # no tilt
M = geu.MATP(vl, pl, phi, tilt,'V')
vg = np.dot(M, vl)
pg = np.dot(M, pl)
np.testing.assert_almost_equal(vg,[0,1,0]) # pointing in y
np.testing.assert_almost_equal(pg,[0,0,1]) # polar along z
M = geu.MATP(vl, pl, phi, tilt,'H')
vg = np.dot(M, vl)
pg = np.dot(M, pl)
np.testing.assert_almost_equal(vg,[0,1,0]) # pointing in y
np.testing.assert_almost_equal(pg,[-1,0,0]) # polar along x
def test_Bthph(self):
th = np.array([np.pi/2.])
ph = np.array([np.pi/4.])
M = np.array([[0,-1,0],[1,0,0],[0,0,1]]) # rotation -np.pi/2 along z
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg, np.pi/2.)
np.testing.assert_almost_equal(phg,-np.pi/4.)
th = np.array([np.pi/2.])
ph = np.array([np.pi/4.])
M= np.array([[1,0,0],[0,0,-1],[0,1,0]])# rotation -np.pi/2 along x
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg,3*np.pi/4.)
np.testing.assert_almost_equal(phg,0.)
th = np.array([np.pi/2.])
ph = np.array([np.pi/4.])
M=np.array([[1,0,0],[0,0,1],[0,-1,0]])# rotation np.pi/2 along x
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg, np.pi/4.)
np.testing.assert_almost_equal(phg,0.)
th = np.array([np.pi/4])
ph = np.array([0.])
M=np.array([[1,0,0],[0,0,1],[0,-1,0]]) # rotation np.pi/2 along x
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg, np.pi/2.)
np.testing.assert_almost_equal(phg,-np.pi/4.)
M=np.array([[1,0,0],[0,0,-1],[0,1,0]]) # rotation -np.pi/2 along x
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg, np.pi/2.)
np.testing.assert_almost_equal(phg, np.pi/4.)
M=np.eye(3)
thg, phg = geu.Bthph(th, ph, M)
np.testing.assert_almost_equal(thg, th)
| np.testing.assert_almost_equal(phg, ph) | numpy.testing.assert_almost_equal |
import pandas as pd
import seaborn as sns
import json
import matplotlib.pyplot as plt
import sys
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import model_selection
from sklearn.exceptions import UndefinedMetricWarning
import warnings
import numpy as np
import scipy as sp
class CPUGPUComparison():
def __init__( self ):
print('CPU GPU SpTRSV performance comparison\n')
def DrawComparisonTable(self, filename):
perf_dataset = pd.read_csv(filename)
winner_df = perf_dataset.idxmin(axis=1)
winner_counts = winner_df.value_counts()
norm_winner_counts = winner_df.value_counts(normalize=True)*100
print(" ----------------------------------------------------------------------------------------------------")
print(" |%15s%35s%32s%15s |" % ("Architecture |","SpTRSV implementation |","Winner for # of matrices |",\
"Percentage"))
print(" ----------------------------------------------------------------------------------------------------")
print(" |%15s%35s%30d%s%13.2f %% |" % ("CPU |","MKL(seq) |", winner_counts['mkl_seq']," |",norm_winner_counts['mkl_seq']))
print(" |%15s%35s%30d%s%13.2f %% |" % ("|","MKL(par) |", winner_counts['mkl_par']," |",norm_winner_counts['mkl_par']))
print(" ----------------------------------------------------------------------------------------------------")
print(" |%15s%35s%30d%s%13.2f %% |" % ("GPU |","cuSPARSE(v1) |", winner_counts['cusparse_v1']," |",norm_winner_counts['cusparse_v1']))
print(" |%15s%35s%30d%s%13.2f %% |" % ("|","cuSPARSE(v2)(level-sch.) |", winner_counts['cusparse_v2_lvl']," |",norm_winner_counts['cusparse_v2_lvl']))
print(" |%15s%35s%30d%s%13.2f %% |" % ("|","cuSPARSE(v2)(no level sch.) |", winner_counts['cusparse_v2_nolvl']," |",norm_winner_counts['cusparse_v2_nolvl']))
print(" |%15s%35s%30d%s%13.2f %% |" % ("|","Sync-Free |", winner_counts['syncfree']," |",norm_winner_counts['syncfree']))
print(" ----------------------------------------------------------------------------------------------------")
def DrawStatsTable(self, filename):
stats_dataset = pd.read_csv(filename)
ds_median = stats_dataset.median()
ds_min = stats_dataset.min()
ds_max = stats_dataset.max()
min_rows = ds_min['rows']/1000
median_rows = ds_median['rows']/1000
max_rows = ds_max['rows']/1000000
min_nnzs = ds_min['nnzs']/1000
median_nnzs = ds_median['nnzs']/1000
max_nnzs = ds_max['nnzs']/1000000
print(' ---------------------------------------------------------------------')
print(" |%20s%16s%16s%16s"%(" |","Minimum |", "Median |","Maximum |"))
print(' ---------------------------------------------------------------------')
print(" |%20s%13.2fK%s%13.2fK%s%13.2fM%s"%("Number of rows |",min_rows," |", median_rows," |",max_rows, " |"))
print(' ---------------------------------------------------------------------')
print(" |%20s%13.3fK%s%13.3fK%s%13.3fM%s"%("Number of nonzeros |",min_nnzs, " |",median_nnzs, " |", max_nnzs," |"))
print(' ---------------------------------------------------------------------')
def DrawFigure(self, filename):
perf_data = pd.read_csv(filename)
perf_data.to_json("temp.json", orient='records')
with open("temp.json", "r") as filename:
V100_Gold_dataset_json = json.load(filename)
V100_Gold_json_formatted = []
for i in range(0, 37):
V100_Gold_json_formatted.append({
"Platform 1": V100_Gold_dataset_json[i]["Platform"],
"Matrix 1": V100_Gold_dataset_json[i]["Matrix ID"],
"Execution Time 1": V100_Gold_dataset_json[i]["Execution Time"],
"Degree of Parallelism 1":V100_Gold_dataset_json[i]["Degree of Parallelism"],
"Winner 1":V100_Gold_dataset_json[i]["Winner"],
"Platform 2": V100_Gold_dataset_json[i+37]["Platform"],
"Matrix 2": V100_Gold_dataset_json[i+37]["Matrix ID"],
"Execution Time 2": V100_Gold_dataset_json[i+37]["Execution Time"],
"Degree of Parallelism 2":V100_Gold_dataset_json[i]["Degree of Parallelism"],
"Winner 2": V100_Gold_dataset_json[i+37]["Winner"]})
V100_Gold_json_formatted = sorted(V100_Gold_json_formatted, key = lambda i: (i['Winner 1'], i['Degree of Parallelism 1']))
V100_Gold_json_sorted = []
V100_Gold_Matrix = []
for i in range(0, 37):
V100_Gold_json_sorted.append({
"Platform": V100_Gold_json_formatted[i]["Platform 1"],
"Matrix ID": V100_Gold_json_formatted[i]["Matrix 1"],
"Degree of Parallelism": V100_Gold_json_formatted[i]["Degree of Parallelism 1"],
"Execution Time": V100_Gold_json_formatted[i]["Execution Time 1"],
})
V100_Gold_Matrix.append(V100_Gold_json_formatted[i]["Matrix 1"])
for i in range(0, 37):
V100_Gold_json_sorted.append({
"Platform": V100_Gold_json_formatted[i]["Platform 2"],
"Matrix ID": V100_Gold_json_formatted[i]["Matrix 2"],
"Degree of Parallelism": V100_Gold_json_formatted[i]["Degree of Parallelism 2"],
"Execution Time": V100_Gold_json_formatted[i]["Execution Time 2"],
})
with open("temp2.json", "w") as file2:
json.dump(V100_Gold_json_sorted, file2)
V100_Gold = pd.read_json('temp2.json', orient='records')
plt.figure(figsize=(15,5))
p1 = sns.barplot(x="Matrix ID",y="Execution Time",hue="Platform", data=V100_Gold,palette = "magma", edgecolor = 'w', order=V100_Gold_Matrix)
sns.set(font_scale = 1.3)
sns.set_style("white")
p1.set_yscale("log")
p1.set_xticklabels(p1.get_xticklabels(), rotation=90)
ax1 = p1.axes
ax1.set(xticklabels=V100_Gold["Degree of Parallelism"])
ax1.axvline(12.5, ls='--', lw=1.8)
ax1.text(1.0, 200, "GPU winners: 24")
ax1.text(1.0, 120, "CPU winners: 13")
p1.set_xlabel("Matrix degree of parallelism (DoP)")
p1.set_ylabel("Lower triangular solve time (msec)")
legend = p1.legend()
legend.texts[0].set_text("NVIDIA V100")
legend.texts[1].set_text("Intel Gold")
plt.legend(loc='upper right')
plt.setp(ax1.xaxis.get_majorticklabels(), ha='center')
fig1 = p1.get_figure()
fig1.set_rasterized(True)
fig1.savefig('./datasets/figure2.eps', bbox_inches='tight',rasterized=True)
print("Figure 2 saved in datasets directory as figure2.eps")
plt.show()
class FeatureSelection():
def __init__( self ):
print('Feature Selection\n')
def PrintAllFeatures(self, filename):
features = pd.read_csv(filename)
for col in features.columns:
print(col)
def FeatureRanking(self, filename):
features_data = pd.read_csv(filename)
features = features_data.drop(['winner'], axis = 1)
target = features_data['winner']
features=features[:-2]
target=target[:-2]
KBestFeatures = SelectKBest(score_func=chi2, k=30)
fit = KBestFeatures.fit(features, target)
rank = [i+1 for i in range(30)]
rank_dict = {'Rank':rank}
rank_df = pd.DataFrame(data=rank_dict)
feature_dict = {'Feature':features.columns, 'Score':fit.scores_}
feature_df = pd.DataFrame(data=feature_dict)
desc = ['Number of rows', 'Number of non-zeros','Number of levels', \
'Maximum row length count', 'Maximum column length count', "Minimum column length count", \
'Minimum row length count', 'Maximum non-zeros per level row-wise', \
'Maximum non-zeros per level column-wise', 'Maximum row length', \
'Maximum column length', 'Mean row-length',\
'Maximum rows per level','Median rows per level', \
'Median row length', 'Median column length', \
'Mean non-zeros per level row-wise', 'Standard deviation rows per level', \
'Standard deviation non-zeros per level row-wise', 'Standard deviation rows length', \
'Standard deviation column length','Mean rows per level', 'Mean max column length per level', \
'Mean mean column length per level', 'Mean std. deviation column length per level', \
'Mean maximum row length per level','Mean standard deviation row length per level',\
'Mean mean row length per level','Mean minimum row length per level',\
'Mean median row length per level']
feature_df['Description'] = desc
feature_df_sorted = feature_df.nlargest(30, 'Score')
feature_df_sorted.reset_index(drop=True,inplace=True)
feature_df_sorted.index += 1
print(feature_df_sorted.to_string(index=True))
class Prediction():
def __init__( self ):
print('Prediction\n')
def CrossValidation(self, filename, mode):
training_data = pd.read_csv(filename)
if mode == 1: # Traning set for 10 features
X = training_data.drop(['min_rl_cnt','mean_rpl','median_rpl','max_cl','lvls','std_rpl', \
'mean_max_cl_pl','mean_mean_cl_pl','max_rl','mean_std_cl_pl','mean_max_rl_pl',\
'std_cl','mean_std_rl_pl','mean_mean_rl_pl','mean_median_rl_pl','mean_min_rl_pl',\
'mean_rl','median_rl','median_cl','std_rl','mkl_seq','mkl_par','cusparse_v1',\
'cusparse_v2_lvl','cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner',\
'2nd','3rd','4th','5th','6th'], axis=1)
else: # Traning set for 30 features
X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl', \
'cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd',\
'3rd','4th','5th','6th'], axis=1)
y = training_data['winner']
sc = StandardScaler()
X_scaled = sc.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44)
rfc_algo_selection = RandomForestClassifier(n_estimators=300)
rfc_algo_selection.fit(X_train, y_train)
pred_rfc_algo_selection = rfc_algo_selection.predict(X_test)
seed = 10
cv_results = []
accuracy = 'accuracy'
precision = 'precision_weighted'
recall = 'recall_weighted'
f1_score = 'f1_weighted'
test_precision = 'test_precision_weighted'
test_recall = 'test_recall_weighted'
test_f1 = 'test_f1_weighted'
test_accuracy = 'test_accuracy'
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
scoring = [accuracy, precision, recall,f1_score]
kfold = model_selection.KFold(n_splits=10, random_state=seed)
with warnings.catch_warnings():
scores = model_selection.cross_validate(rfc_algo_selection, X_scaled, y, cv=kfold,scoring=scoring)
cv_results.append(scores[test_accuracy])
cv_results.append(scores[test_precision])
cv_results.append(scores[test_recall])
cv_results.append(scores[test_f1])
print('Mean accuracy: %0.1f %%' % (cv_results[0].mean()*100.0))
print('Mean precision: %0.1f %%' % (cv_results[1].mean()*100.0))
print('Mean recall: %0.1f %%' % (cv_results[2].mean()*100.0))
print('Mean f1-score: %0.1f %%' % (cv_results[3].mean()*100.0))
print('Median accuracy: %0.1f %%' % (np.median(cv_results[0])*100.0))
print('Median precision: %0.1f %%' % (np.median(cv_results[1])*100.0))
print('Median recall: %0.1f %%' % (np.median(cv_results[2])*100.0))
print('Median f1-score: %0.1f %%\n' % (np.median(cv_results[3])*100.0))
labels = ['Accuracy', 'Precision', 'Recall', 'F1-score']
ax1 = sns.boxplot(y=cv_results,x=labels, showmeans=True, fliersize=1,meanprops={"marker":"D","markerfacecolor":"yellow", "markeredgecolor":"none"})
sns.set(font_scale=1.3)
sns.set_style("white")
vals = ax1.get_yticks()
ax1.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
myfigure = ax1.get_figure()
if mode == 1:
myfigure.savefig('./datasets/figure6.png',bbox_inches='tight')
print("Figure 8 saved in datasets as figure8.eps")
print("Note: Statistics can slightly vary from Figure 8 and from run-to-run")
else:
myfigure.savefig('./datasets/figure7.eps',bbox_inches='tight')
myfigure.show()
print("Figure 7 saved in datasets as figure7.eps")
print("Note: Statistics can slightly vary from Figure 7 and from run-to-run")
plt.show()
class Performance():
def __init__( self ):
print('Performance Results\n')
def Speedup(self, filename):
training_data = pd.read_csv(filename)
X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl', \
'cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd',\
'3rd','4th','5th','6th'], axis=1)
y = training_data['winner']
sc = StandardScaler()
X_scaled = sc.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44)
rfc_algo_selection = RandomForestClassifier(n_estimators=300)
rfc_algo_selection.fit(X_train, y_train)
pred_rfc_algo_selection = rfc_algo_selection.predict(X_test)
seed = 10
precision = 'precision_weighted'
recall = 'recall_weighted'
f1_score = 'f1_weighted'
scoring = [precision, recall,f1_score]
kfold = model_selection.KFold(n_splits=10)
cross_validate_pred = model_selection.cross_val_predict(rfc_algo_selection, X_scaled, y, cv=kfold)
MKL_seq = training_data['mkl_seq']
MKL_par = training_data['mkl_par']
cus1 = training_data['cusparse_v1']
cus2_lvl = training_data['cusparse_v2_lvl']
cus2_nolvl = training_data['cusparse_v2_nolvl']
syncfree = training_data['syncfree']
algo_labels = {0:'MKL(seq)', 1:'MKL(par)', 2:'cuSPARSE(v1)', \
3:'cuSPARSE(v2)(level-sch.)',4:'cuSPARSE(v2)(no level-sch.)',5:'Sync-Free'}
Gain_vs_MKL_seq = []
Gain_vs_MKL_par = []
Gain_vs_cus1 = []
Gain_vs_cus2_lvl = []
Gain_vs_cus2_nolvl = []
Gain_vs_syncfree = []
i = 0
for val in cross_validate_pred:
if val == 1:
predicted_time = MKL_seq[i]
if val == 2:
predicted_time = MKL_par[i]
if val == 3:
predicted_time = cus1[i]
if val == 4:
predicted_time = cus2_lvl[i]
if val == 5:
predicted_time = cus2_nolvl[i]
if val == 6:
predicted_time = syncfree[i]
Gain_vs_MKL_seq.append(MKL_seq[i]/predicted_time)
Gain_vs_MKL_par.append(MKL_par[i]/predicted_time)
Gain_vs_cus1.append(cus1[i]/predicted_time)
Gain_vs_cus2_lvl.append(cus2_lvl[i]/predicted_time)
Gain_vs_cus2_nolvl.append(cus2_nolvl[i]/predicted_time)
Gain_vs_syncfree.append(syncfree[i]/predicted_time)
i = i + 1
predicted_speedup=[]
predicted_speedup.append(Gain_vs_MKL_seq)
predicted_speedup.append(Gain_vs_MKL_par)
predicted_speedup.append(Gain_vs_cus1)
predicted_speedup.append(Gain_vs_cus2_lvl)
predicted_speedup.append(Gain_vs_cus2_nolvl)
predicted_speedup.append(Gain_vs_syncfree)
speedup_g2 = []
speedup_l1 = []
counter = 0
counter_l = 0
counter_l95 = 0
for i in range(6):
for x in predicted_speedup[i]:
if x >= 1:
counter = counter + 1
if x < 1:
counter_l = counter_l + 1
if x < 0.95:
counter_l95 = counter_l95 + 1
speedup_g2.append(counter/998*100)
speedup_l1.append(counter_l/998*100)
counter = 0
counter_l = 0
counter_l95 = 0
sns.set(font_scale=1.0)
sns.set_style("white")
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10, 4.5))
fig.set_rasterized(True)
k = 0
for i in range(2):
for j in range(3):
#my_bins = [0,1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,int(np.max(predicted_speedup[k]))]
max_ps = np.max(predicted_speedup[k])
my_bins = np.arange(0, 75)
clrs=['#CB4335' if (x < 1) else '#2874A6' for x in my_bins]
plot = sns.distplot(predicted_speedup[k], \
bins=my_bins, ax=ax[i][j],kde=False)
sns.color_palette("husl", 8)
ax1 = plot.axes
for rec, clr in zip(ax1.patches, clrs):
rec.set_color(clr)
props = dict(boxstyle='round', facecolor='none', alpha=0.5)
ax1.text(0.55, 0.70, ">=1: %.1f%%"%(speedup_g2[k]), transform=ax1.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax1.text(0.55, 0.85, "Mean: %.1f"%(sp.stats.hmean(predicted_speedup[k])), transform=ax1.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
z_critical = sp.stats.norm.ppf(q = 0.95) # Get the z-critical value*
pop_stdev = np.std(predicted_speedup[k])
hmean = sp.stats.hmean(predicted_speedup[k])
mean_m_x = [(hmean-x) for x in predicted_speedup]
mean_m_x = [np.sqrt(x*x) for x in mean_m_x]
sample_size = len(predicted_speedup[k])
h_std = np.sum(mean_m_x)/sample_size
margin_of_error = z_critical * (pop_stdev/np.sqrt(sample_size))
plot.set_yscale("log")
#if k >= 3:
plot.set_xlabel("Speedup")
plot.set_title(algo_labels[k],loc="left")
if k == 0 or k == 3:
plot.set_ylabel('Number of matrices')
k = k + 1
plt.tight_layout()
warnings.filterwarnings("ignore")
with warnings.catch_warnings():
fig.savefig('./datasets/figure9.pdf',bbox_inches='tight',rasterized=True)
print("Figure 9 saved in datasets as figure9.eps")
print("Note: Statistics can slightly vary from Figure 9 and from run-to-run")
#plt.show()
def Overheads(self, filename_training, filename_overhead):
training_data=pd.read_csv(filename_training)
overhead_data=pd.read_csv(filename_overhead)
FE_wo_ilu = overhead_data['FE_oh_wo'] # Feature extraction (FE) overhead without ILU factorization time included
FE_w_ilu = overhead_data['FE_oh_w'] # Feature extraction (FE) ovheread with ILU factorization time included
m=overhead_data['m'] # Number of rows
MKL_seq = training_data['mkl_seq']
MKL_par = training_data['mkl_par']
cus1 = training_data['cusparse_v1']
cus2_lvl = training_data['cusparse_v2_lvl']
cus2_nolvl = training_data['cusparse_v2_nolvl']
syncfree = training_data['syncfree']
seed = 250
precision = 'precision_weighted'
recall = 'recall_weighted'
f1_score = 'f1_weighted'
scoring = [precision, recall,f1_score]
X = training_data.drop(['mkl_seq','mkl_par','cusparse_v1','cusparse_v2_lvl','cusparse_v2_nolvl','syncfree','winner','CPU winner','GPU winner','2nd','3rd','4th','5th','6th'], axis=1)
y = training_data['winner']
sc = StandardScaler()
X_scaled = sc.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25, random_state=44)
rfc_algo_selection = RandomForestClassifier(n_estimators=300)
rfc_algo_selection.fit(X_train, y_train)
kfold = model_selection.KFold(n_splits=10)
cross_validate_pred = model_selection.cross_val_predict(rfc_algo_selection, X_scaled, y, cv=kfold)
L_calls_vs_FE_wo_100K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices upto 100K rows
L_calls_vs_FE_w_100K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices upto 100K rows
L_calls_vs_FE_wo_1000K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices from 100K-1000K rows
L_calls_vs_FE_w_1000K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices from 100K-1000K rows
L_calls_vs_FE_wo_g1000K = [] # FE overhead in terms of lower triangular solve iterations without ILU factorization time included for matrices > 1000K rows
L_calls_vs_FE_w_g1000K = [] # FE overhead in terms of lower triangular solve iterations with ILU factorization time included for matrices > 1000K rows
oh_FE_wo_100K = [] # FE overhead without ILU factorization time included for matrices upto 100K
oh_FE_w_100K = [] # FE overhead with ILU factorization time included for matrices upto 100K
oh_FE_wo_1000K = [] # FE overhead without ILU factorization time included for matrices upto 100K-1000K
oh_FE_w_1000K = [] # FE overhead with ILU factorization time included for matrices upto 100K-1000K
oh_FE_wo_g1000K = [] # FE overhead without ILU factorization time included for matrices > 1000K
oh_FE_w_g1000K = [] # FE overhead without ILU factorization time included for matrices > 1000K
oh_MKLs_wo_100K = [] # MKL(ser) overhead without ILU factorization time included for matrices upto 100K
oh_MKLs_w_100K = [] # MKL(ser) overhead with ILU factorization time included for matrices upto 100K
oh_MKLp_wo_100K = [] # MKL(par) overhead without ILU factorization time included for matrices upto 100K
oh_MKLp_w_100K = [] # MKL(par) overhead with ILU factorization time included for matrices upto 100K
oh_CUS1_wo_100K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices upto 100K
oh_CUS1_w_100K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices upto 100K
oh_CUS2lvl_wo_100K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices upto 100K
oh_CUS2lvl_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K
oh_CUS2nolvl_wo_100K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices upto 100K
oh_CUS2nolvl_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K
oh_SyncFree_wo_100K = [] # SyncFree overhead without ILU factorization time included for matrices upto 100K
oh_SyncFree_w_100K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices upto 100K
oh_MKLs_wo_1000K = [] # MKL(ser) overhead without ILU factorization time included for matrices from 100K-1000K
oh_MKLs_w_1000K = [] # MKL(ser) overhead with ILU factorization time included for matrices from 100K-1000K
oh_MKLp_wo_1000K = [] # MKL(par) overhead without ILU factorization time included for matrices from 100K-1000K
oh_MKLp_w_1000K = [] # MKL(par) overhead with ILU factorization time included for matrices from 100K-1000K
oh_CUS1_wo_1000K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS1_w_1000K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices from 100K-1000K
oh_CUS2lvl_wo_1000K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS2lvl_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K
oh_CUS2nolvl_wo_1000K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS2nolvl_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K
oh_SyncFree_wo_1000K = [] # SyncFree overhead without ILU factorization time included for matrices from 100K-1000K
oh_SyncFree_w_1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices from 100K-1000K
oh_MKLs_wo_g1000K = [] # MKL(ser) overhead without ILU factorization time included for matrices > 1000K
oh_MKLs_w_g1000K = [] # MKL(ser) overhead with ILU factorization time included for matrices > 1000K
oh_MKLp_wo_g1000K = [] # MKL(par) overhead without ILU factorization time included for matrices > 1000K
oh_MKLp_w_g1000K = [] # MKL(par) overhead with ILU factorization time included for matrices > 1000K
oh_CUS1_wo_g1000K = [] # cuSPARSE(v1) overhead without ILU factorization time included for matrices > 1000K
oh_CUS1_w_g1000K = [] # cuSPARSE(v1) overhead with ILU factorization time include for matrices > 1000K
oh_CUS2lvl_wo_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead without ILU factorization time included for matrices > 1000K
oh_CUS2lvl_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K
oh_CUS2nolvl_wo_g1000K = [] # cuSPARSE(v2)(no level-sch.) overhead without ILU factorization time included for matrices > 1000K
oh_CUS2nolvl_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K
oh_SyncFree_wo_g1000K = [] # SyncFree overhead without ILU factorization time included for matrices > 1000K
oh_SyncFree_w_g1000K = [] # cuSPARSE(v2)(level-sch.) overhead with ILU factorization time included for matrices > 1000K
oh_MKLs_wo_100K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_MKLs_w_100K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices upto 100K
oh_MKLp_wo_100K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_MKLp_w_100K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices upto 100K
oh_CUS1_wo_100K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_CUS1_w_100K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices upto 100K
oh_CUS2lvl_wo_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_CUS2lvl_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K
oh_CUS2nolvl_wo_100K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_CUS2nolvl_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K
oh_SyncFree_wo_100K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices upto 100K
oh_SyncFree_w_100K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices upto 100K
oh_MKLs_wo_1000K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_MKLs_w_1000K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K
oh_MKLp_wo_1000K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_MKLp_w_1000K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K
oh_CUS1_wo_1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS1_w_1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices from 100K-1000K
oh_CUS2lvl_wo_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS2lvl_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K
oh_CUS2nolvl_wo_1000K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_CUS2nolvl_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K
oh_SyncFree_wo_1000K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices from 100K-1000K
oh_SyncFree_w_1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices from 100K-1000K
oh_MKLs_wo_g1000K_ana = [] # MKL(ser) algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_MKLs_w_g1000K_ana = [] # MKL(ser) algorithm analysis overhead with ILU factorization time included for matrices > 1000K
oh_MKLp_wo_g1000K_ana = [] # MKL(par) algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_MKLp_w_g1000K_ana = [] # MKL(par) algorithm analysis overhead with ILU factorization time included for matrices > 1000K
oh_CUS1_wo_g1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_CUS1_w_g1000K_ana = [] # cuSPARSE(v1) algorithm analysis overhead with ILU factorization time include for matrices > 1000K
oh_CUS2lvl_wo_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_CUS2lvl_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K
oh_CUS2nolvl_wo_g1000K_ana = [] # cuSPARSE(v2)(no level-sch.) algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_CUS2nolvl_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K
oh_SyncFree_wo_g1000K_ana = [] # SyncFree algorithm analysis overhead without ILU factorization time included for matrices > 1000K
oh_SyncFree_w_g1000K_ana = [] # cuSPARSE(v2)(level-sch.) algorithm analysis overhead with ILU factorization time included for matrices > 1000K
emp_oh_wo_100K = 0 # Empirical execution overhead without ILU factorization time included for matrices upto 100K
emp_oh_wo_1000k = 0 # Empirical execution overhead without ILU factorization time included for matrices from 100K-1000K
emp_oh_wo_g1000k = 0 # Empirical execution overhead without ILU factorization time included for matrices > 1000K
emp_oh_w_100K = 0 # Empirical execution overhead with ILU factorization time included for matrices upto 100K
emp_oh_w_1000k = 0 # Empirical execution overhead with ILU factorization time included for matrices from 100K-1000K
emp_oh_w_g1000k = 0 # Empirical execution overhead with ILU factorization time included for matrices > 1000K
i = 0
for val in cross_validate_pred:
if val == 1:
predicted_time = MKL_seq[i]
if val == 2:
predicted_time = MKL_par[i]
if val == 3:
predicted_time = cus1[i]
if val == 4:
predicted_time = cus2_lvl[i]
if val == 5:
predicted_time = cus2_nolvl[i]
if val == 6:
predicted_time = syncfree[i]
if m[i] < 100000:
L_calls_vs_FE_wo_100K.append(FE_wo_ilu[i]*1000/predicted_time)
L_calls_vs_FE_w_100K.append(FE_w_ilu[i]*1000/predicted_time)
oh_MKLs_wo_100K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]))
oh_MKLs_w_100K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]+\
overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_100K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]))
oh_MKLp_w_100K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]+\
overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_100K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i]))
oh_CUS1_w_100K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i]+\
overhead_data['cuSPARSE(v1) ilu'][i]))
oh_CUS2lvl_wo_100K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]))
oh_CUS2lvl_w_100K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]+\
+overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_100K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i]))
oh_CUS2nolvl_w_100K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i]))
oh_SyncFree_wo_100K.append((overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i]))
oh_SyncFree_w_100K.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]+\
overhead_data['Sync-Free 10 iter'][i]))
oh_FE_wo_100K.append(overhead_data['FE_oh_wo'][i])
oh_FE_w_100K.append(overhead_data['FE_oh_w'][i])
oh_MKLs_wo_100K_ana.append((overhead_data['MKL(seq) Ana'][i]))
oh_MKLs_w_100K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_100K_ana.append((overhead_data['MKL(par) Ana'][i]))
oh_MKLp_w_100K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_100K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]))
oh_CUS1_w_100K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]))
oh_CUS2lvl_wo_100K_ana.append((overhead_data['cusparse(v2)ana'][i]))
oh_CUS2lvl_w_100K_ana.append((overhead_data['cusparse(v2)ana'][i]+\
overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_100K_ana.append(0)
oh_CUS2nolvl_w_100K_ana.append(0)
oh_SyncFree_wo_100K_ana.append((overhead_data['Sync-Free ana'][i]))
oh_SyncFree_w_100K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]))
if m[i] >= 100000 and m[i] < 1000000:
L_calls_vs_FE_wo_1000K.append(FE_wo_ilu[i]*1000/predicted_time)
L_calls_vs_FE_w_1000K.append(FE_w_ilu[i]*1000/predicted_time)
oh_MKLs_wo_1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]))
oh_MKLs_w_1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) 10 iter'][i]+\
overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]))
oh_MKLp_w_1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) 10 iter'][i]+\
overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+\
overhead_data['cuSPARSE(v1) 10 iter'][i]))
oh_CUS1_w_1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+\
overhead_data['cuSPARSE(v1) 10 iter'][i]+overhead_data['cuSPARSE(v1) ilu'][i]))
oh_CUS2lvl_wo_1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]))
oh_CUS2lvl_w_1000K.append((overhead_data['cusparse(v2)ana'][i]+\
overhead_data['cuSPARSE(v2)lvl'][i]+\
overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_1000K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i]))
oh_CUS2nolvl_w_1000K.append((overhead_data['cuSPARSE(v2)nolvl 10 iter'][i]))
oh_SyncFree_wo_1000K.append((overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i]))
oh_SyncFree_w_1000K.append((overhead_data['SycnFree_LU'][i]+\
overhead_data['Sync-Free ana'][i]+overhead_data['Sync-Free 10 iter'][i]))
oh_FE_wo_1000K.append((overhead_data['FE_oh_wo'][i]))
oh_FE_w_1000K.append((overhead_data['FE_oh_w'][i]))
oh_MKLs_wo_1000K_ana.append((overhead_data['MKL(seq) Ana'][i]))
oh_MKLs_w_1000K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_1000K_ana.append((overhead_data['MKL(par) Ana'][i]))
oh_MKLp_w_1000K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]))
oh_CUS1_w_1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]))
oh_CUS2lvl_wo_1000K_ana.append((overhead_data['cusparse(v2)ana'][i]))
oh_CUS2lvl_w_1000K_ana.append((overhead_data['cusparse(v2)ana'][i]+\
overhead_data['cuSPARSE(v2)iluAna'][i]+\
overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_1000K_ana.append(0)
oh_CUS2nolvl_w_1000K_ana.append(0)
oh_SyncFree_wo_1000K_ana.append((overhead_data['Sync-Free ana'][i]))
oh_SyncFree_w_1000K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]))
#emp_oh_wo_1000K.append(oh_MKLs_wo_1000K[i]+oh_MKLp_wo_1000K[i]+oh_CUS1_wo_1000K[i]+oh_CUS2lvl_wo_1000K[i]+oh_CUS2nolvl_wo_1000K[i]+oh_SyncFree_wo_1000K[i])
if m[i] >= 1000000:
L_calls_vs_FE_wo_g1000K.append(FE_wo_ilu[i]*1000/predicted_time)
L_calls_vs_FE_w_g1000K.append(FE_w_ilu[i]*1000/predicted_time)
oh_MKLs_wo_g1000K.append((overhead_data['MKL(seq) Ana'][i]))
oh_MKLs_w_g1000K.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_g1000K.append((overhead_data['MKL(par) Ana'][i]))
oh_MKLp_w_g1000K.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_g1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i]))
oh_CUS1_w_g1000K.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]+overhead_data['cuSPARSE(v1) 10 iter'][i]))
oh_CUS2lvl_wo_g1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]))
oh_CUS2lvl_w_g1000K.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]+\
overhead_data['cuSPARSE(v2)iluAna'][i]+overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_g1000K.append((0))
oh_CUS2nolvl_w_g1000K.append((0))
oh_SyncFree_wo_g1000K.append((overhead_data['Sync-Free ana'][i]))
oh_SyncFree_w_g1000K.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]))
oh_FE_wo_g1000K.append(overhead_data['FE_oh_wo'][i])
oh_FE_w_g1000K.append(overhead_data['FE_oh_w'][i])
oh_MKLs_wo_g1000K_ana.append((overhead_data['MKL(seq) Ana'][i]))
oh_MKLs_w_g1000K_ana.append((overhead_data['MKL(seq) Ana'][i]+overhead_data['MKL(seq) ilu'][i]))
oh_MKLp_wo_g1000K_ana.append((overhead_data['MKL(par) Ana'][i]))
oh_MKLp_w_g1000K_ana.append((overhead_data['MKL(par) Ana'][i]+overhead_data['MKL(par) ilu'][i]))
oh_CUS1_wo_g1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]))
oh_CUS1_w_g1000K_ana.append((overhead_data['cuSPARSE(v1) ana'][i]+overhead_data['cuSPARSE(v1) ilu'][i]))
oh_CUS2lvl_wo_g1000K_ana.append((overhead_data['cusparse(v2)ana'][i]))
oh_CUS2lvl_w_g1000K_ana.append((overhead_data['cusparse(v2)ana'][i]+overhead_data['cuSPARSE(v2)lvl'][i]+\
overhead_data['cuSPARSE(v1) ilu'][i]+overhead_data['cuSPARSE(v2)iluAna'][i]+\
overhead_data['cuSPARSE(v2)iu'][i]))
oh_CUS2nolvl_wo_g1000K_ana.append(0)
oh_CUS2nolvl_w_g1000K_ana.append(0)
oh_SyncFree_wo_g1000K_ana.append((overhead_data['Sync-Free ana'][i]))
oh_SyncFree_w_g1000K_ana.append((overhead_data['SycnFree_LU'][i]+overhead_data['Sync-Free ana'][i]))
#emp_oh_wo_g1000K.append(oh_MKLs_wo_g1000K[i] + oh_MKLp_wo_g1000K[i] + oh_CUS1_wo_g1000K[i] + oh_CUS2lvl_wo_g1000K[i] + oh_CUS2nolvl_wo_g1000K[i] + oh_SyncFree_wo_g1000K[i])
i = i + 1
emp_oh_wo_100K = (np.sum(oh_MKLs_wo_100K)+np.sum(oh_MKLp_wo_100K)+np.sum(oh_CUS1_wo_100K) + \
np.sum(oh_CUS2lvl_wo_100K) + np.sum(oh_CUS2nolvl_wo_100K) + np.sum(oh_SyncFree_wo_100K))\
/(len(oh_MKLs_wo_100K)*1000)
emp_oh_wo_1000K = (np.sum(oh_MKLs_wo_1000K)+np.sum(oh_MKLp_wo_1000K)+np.sum(oh_CUS1_wo_1000K) + \
np.sum(oh_CUS2lvl_wo_1000K) + np.sum(oh_CUS2nolvl_wo_1000K) + np.sum(oh_SyncFree_wo_1000K))\
/(len(oh_MKLs_wo_1000K)*1000)
emp_oh_wo_g1000K = (np.sum(oh_MKLs_wo_g1000K)+np.sum(oh_MKLp_wo_g1000K)+np.sum(oh_CUS1_wo_g1000K) + \
np.sum(oh_CUS2lvl_wo_g1000K) + np.sum(oh_CUS2nolvl_wo_g1000K) + np.sum(oh_SyncFree_wo_g1000K))\
/(len(oh_MKLs_wo_g1000K)*1000)
emp_oh_w_100K = (np.sum(oh_MKLs_w_100K)+np.sum(oh_MKLp_w_100K)+np.sum(oh_CUS1_w_100K) + \
np.sum(oh_CUS2lvl_w_100K) + np.sum(oh_CUS2nolvl_w_100K) + np.sum(oh_SyncFree_w_100K))/(len(oh_MKLs_w_100K)*1000)
emp_oh_w_1000K = (np.sum(oh_MKLs_w_1000K)+np.sum(oh_MKLp_w_1000K)+np.sum(oh_CUS1_w_1000K) + \
np.sum(oh_CUS2lvl_w_1000K) + np.sum(oh_CUS2nolvl_w_1000K) + np.sum(oh_SyncFree_w_1000K))\
/(len(oh_MKLs_w_1000K)*1000)
emp_oh_w_g1000K = (np.sum(oh_MKLs_w_g1000K)+np.sum(oh_MKLp_w_g1000K)+np.sum(oh_CUS1_w_g1000K) + \
np.sum(oh_CUS2lvl_w_g1000K) + np.sum(oh_CUS2nolvl_w_g1000K) + np.sum(oh_SyncFree_w_g1000K))\
/(len(oh_MKLs_w_g1000K)*1000)
emp_oh_wo_g1000K_ana = (np.sum(oh_MKLs_wo_g1000K_ana)+np.sum(oh_MKLp_wo_g1000K_ana)+np.sum(oh_CUS1_wo_g1000K_ana) + \
np.sum(oh_CUS2lvl_wo_g1000K_ana) + np.sum(oh_CUS2nolvl_wo_g1000K_ana) + np.sum(oh_SyncFree_wo_g1000K_ana))\
/(len(oh_MKLs_wo_g1000K_ana)*1000)
emp_oh_w_g1000K_ana = (np.sum(oh_MKLs_w_g1000K_ana)+np.sum(oh_MKLp_w_g1000K_ana)+np.sum(oh_CUS1_w_g1000K_ana) + \
np.sum(oh_CUS2lvl_w_g1000K_ana) + np.sum(oh_CUS2nolvl_w_g1000K_ana) + np.sum(oh_SyncFree_w_g1000K_ana))\
/(len(oh_MKLs_w_g1000K_ana)*1000)
Overhead_wo_100K_bar = (np.sum(oh_FE_wo_100K)/len(oh_FE_wo_100K), emp_oh_wo_100K, \
np.sum(oh_MKLs_wo_100K_ana)/(len(oh_MKLs_wo_100K_ana)*1000),\
np.sum(oh_MKLp_wo_100K_ana)/(len(oh_MKLp_wo_100K_ana)*1000),\
np.sum(oh_CUS1_wo_100K_ana)/(len(oh_MKLs_wo_100K_ana)*1000),\
np.sum(oh_CUS2lvl_wo_100K_ana)/(len(oh_CUS2lvl_wo_100K_ana)*1000),\
np.sum(oh_CUS2lvl_wo_100K_ana)/(len(oh_CUS2lvl_wo_100K_ana)*1000),\
np.sum(oh_SyncFree_wo_100K_ana)/(len(oh_SyncFree_wo_100K_ana)*1000))
Overhead_w_100K_bar = (np.sum(oh_FE_w_100K)/len(oh_FE_w_100K), emp_oh_w_100K, \
np.sum(oh_MKLs_w_100K_ana)/(len(oh_MKLs_w_100K_ana)*1000),\
np.sum(oh_MKLp_w_100K_ana)/(len(oh_MKLp_w_100K_ana)*1000),\
np.sum(oh_CUS1_w_100K_ana)/(len(oh_CUS1_w_100K_ana)*1000),\
np.sum(oh_CUS2lvl_w_100K_ana)/(len(oh_CUS2lvl_w_100K_ana)*1000),\
np.sum(oh_CUS2lvl_w_100K_ana)/(len(oh_CUS2lvl_w_100K_ana)*1000),\
np.sum(oh_SyncFree_w_100K_ana)/(len(oh_SyncFree_w_100K_ana)*1000))
Overhead_wo_1000K_bar = (np.sum(oh_FE_wo_1000K)/len(oh_FE_wo_1000K), emp_oh_wo_1000K, \
np.sum(oh_MKLs_wo_1000K_ana)/(len(oh_MKLs_wo_1000K_ana)*1000),\
np.sum(oh_MKLp_wo_1000K_ana)/(len(oh_MKLp_wo_1000K_ana)*1000),\
np.sum(oh_CUS1_wo_1000K_ana)/(len(oh_MKLs_wo_1000K_ana)*1000),\
np.sum(oh_CUS2lvl_wo_1000K_ana)/(len(oh_CUS2lvl_wo_1000K_ana)*1000),\
np.sum(oh_CUS2lvl_wo_1000K_ana)/(len(oh_CUS2lvl_wo_1000K_ana)*1000),\
np.sum(oh_SyncFree_wo_1000K_ana)/(len(oh_SyncFree_wo_1000K_ana)*1000))
Overhead_w_1000K_bar = (np.sum(oh_FE_w_1000K)/len(oh_FE_w_1000K), emp_oh_w_1000K, \
np.sum(oh_MKLs_w_1000K_ana)/(len(oh_MKLs_w_1000K_ana)*1000),\
np.sum(oh_MKLp_w_1000K_ana)/(len(oh_MKLp_w_1000K_ana)*1000),\
| np.sum(oh_CUS1_w_1000K_ana) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Authors
-------
<NAME> <<EMAIL>>
About
-----
Functions to handle command-line input
Known Issues
------------
None
"""
# General imports
import os
import sys
import time
from functools import partial
import shutil
if os.path.exists(os.path.join(os.getcwd(), 'config')): # You're 1 up from config?
sys.path.insert(0, os.path.join(os.getcwd(), 'config'))
else: # You're working from a directory parallel with config?
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '../config')))
import pickle
# Tractor imports
from tractor import NCircularGaussianPSF, PixelizedPSF, Image, Tractor, FluxesPhotoCal, NullWCS, ConstantSky, EllipseESoft, Fluxes, PixPos
from tractor.galaxy import ExpGalaxy, DevGalaxy, FixedCompositeGalaxy, SoftenedFracDev, GalaxyShape
from tractor.sersic import SersicIndex, SersicGalaxy
from tractor.sercore import SersicCoreGalaxy
from tractor.pointsource import PointSource
from tractor.psfex import PixelizedPsfEx, PsfExModel
from tractor.psf import HybridPixelizedPSF
# Miscellaneous science imports
from astropy.io import fits, ascii
from astropy.table import Table, Column, vstack, join
from astropy.wcs import WCS
import astropy.units as u
import numpy as np
from functools import partial
import matplotlib.pyplot as plt
import weakref
from scipy import stats
import pathos as pa
from astropy.coordinates import SkyCoord
# import sfdmap
# Local imports
from .brick import Brick
from .mosaic import Mosaic
from .utils import header_from_dict, SimpleGalaxy
from .visualization import plot_background, plot_blob, plot_blobmap, plot_brick, plot_mask
try:
import config as conf
except:
raise RuntimeError('Cannot find configuration file!')
# m = sfdmap.SFDMap(conf.SFDMAP_DIR)
# Make sure no interactive plotting is going on.
plt.ioff()
import warnings
warnings.filterwarnings("ignore")
print(
f"""
====================================================================
________ _ _______ ____ ____ ________ _______
|_ __ | / \ |_ __ \ |_ \ / _||_ __ ||_ __ \
| |_ \_| / _ \ | |__) | | \/ | | |_ \_| | |__) |
| _| / ___ \ | __ / | |\ /| | | _| _ | __ /
_| |_ _/ / \ \_ _| | \ \_ _| |_\/_| |_ _| |__/ | _| | \ \_
|_____||____| |____||____| |___||_____||_____||________||____| |___|
--------------------------------------------------------------------
M O D E L P H O T O M E T R Y W I T H T H E T R A C T O R
--------------------------------------------------------------------
(C) 2020 -- <NAME> (DAWN, University of Copenhagen)
====================================================================
CONSOLE_LOGGING_LEVEL ..... {conf.CONSOLE_LOGGING_LEVEL}
LOGFILE_LOGGING_LEVEL ..... {conf.LOGFILE_LOGGING_LEVEL}
PLOT ...................... {conf.PLOT}
NTHREADS .................. {conf.NTHREADS}
OVERWRITE ................. {conf.OVERWRITE}
"""
)
print('Starting up logging system...')
# Start the logging
import logging.config
logger = logging.getLogger('farmer')
if not len(logger.handlers):
if conf.LOGFILE_LOGGING_LEVEL is not None:
logging_level = logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL)
else:
logging_level = logging.DEBUG
logger.setLevel(logging_level)
formatter = logging.Formatter('[%(asctime)s] %(name)s :: %(levelname)s - %(message)s', '%H:%M:%S')
# Logging to the console at logging level
ch = logging.StreamHandler()
ch.setLevel(logging.getLevelName(conf.CONSOLE_LOGGING_LEVEL))
ch.setFormatter(formatter)
logger.addHandler(ch)
if (conf.LOGFILE_LOGGING_LEVEL is None) | (not os.path.exists(conf.LOGGING_DIR)):
print('Logging information wills stream only to console.\n')
else:
# create file handler which logs even debug messages
logging_path = os.path.join(conf.LOGGING_DIR, 'logfile.log')
print(f'Logging information will stream to console and {logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(logging_path):
print('WARNING -- Existing logfile will be overwritten.')
os.remove(logging_path)
fh = logging.FileHandler(logging_path)
fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
fh.setFormatter(formatter)
logger.addHandler(fh)
# When a user invokes the interface, first check the translation file
# Optionally, tell the user.
# Try to import the translate file from it's usual spot first.
try:
from translate import translate
logger.info(f'interface.translation :: Imported translate file with {len(translate.keys())} entries.')
if len(conf.BANDS) != len(translate.keys()):
logger.warning(f'Configuration file only includes {len(conf.BANDS)} entries!')
# I have nicknames in the config, I need the raw names for file I/O
mask = np.ones_like(conf.BANDS, dtype=bool)
for i, band in enumerate(conf.BANDS):
if band not in translate.keys():
logger.warning(f'Cound not find {band} in translate file!')
mask[i] = False
# Re-assign bands and rawbands in config object
logger.debug(f'Assigning nicknames to raw image names:')
conf.BANDS = list(np.array(conf.BANDS)[mask])
conf.RAWBANDS = conf.BANDS.copy()
for i, band in enumerate(conf.RAWBANDS):
conf.RAWBANDS[i] = translate[band]
logger.debug(f' {i+1} :: {conf.RAWBANDS[i]} --> {conf.BANDS[i]}')
# The translation file could not be found, so make a scene.
except:
logger.warning('interface.translation :: WARNING - Could not import translate file! Will use config instead.')
logger.info('interface.translation :: Image names must be < 50 characters (FITS standard) - checking...')
# I have raw names, I need shortened raw names (i.e. nicknames)
conf.RAWBANDS = conf.BANDS.copy()
count_short = 0
for i, band in enumerate(conf.RAWBANDS):
if len(band) > 50:
conf.BANDS[i] = band[:50]
logger.debug(f' {i+1} :: {band} --> {conf.BANDS[i]}')
count_short += 1
logger.info(f'interface.translation :: Done checking. Shortened {count_short} image names.')
def make_directories():
"""Uses the existing config file to set up the directories. Must call from config.py directory!
"""
import pathlib
logger.info('Making directories!')
dir_dict = {'IMAGE_DIR': conf.IMAGE_DIR,
'PSF_DIR': conf.PSF_DIR,
'BRICK_DIR': conf.BRICK_DIR,
'INTERIM_DIR': conf.INTERIM_DIR,
'PLOT_DIR': conf.PLOT_DIR,
'CATALOG_DIR': conf.CATALOG_DIR,
'LOGGING_DIR': conf.LOGGING_DIR
}
for key in dir_dict.keys():
path = dir_dict[key]
if os.path.exists(path): # too important to allow overwrite...
logger.warning(f'{key} already exists under {path}!')
for i in dir_dict.keys():
if path == dir_dict[i]:
logger.info(f'{key} was already created for {i}...OK')
break
else:
logger.info(f'{key} --> {path}')
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def make_psf(image_type=conf.MULTIBAND_NICKNAME, band=None, sextractor_only=False, psfex_only=False, override=conf.OVERWRITE):
""" This is where we automatically construct the PSFs for Farmer.
Step 1. Run sextractor_only=True to obtain the PSF candidates
Step 2. Using the output plot, determine the selection box for the stars
Step 3. Run psfex_only=True to construct the PSF.
See config file to set box dimensions, psf spatial sampling, etc.
"""
# If the user asked to make a PSF for the detection image, tell them we don't do that
if image_type is conf.DETECTION_NICKNAME:
raise ValueError('Farmer does not use a PSF to perform detection!')
# Else if the user asks for a PSF to be made for the modeling band
elif image_type is conf.MODELING_NICKNAME:
# Make the Mosaic
logger.info(f'Making PSF for {conf.MODELING_NICKNAME}')
modmosaic = Mosaic(conf.MODELING_NICKNAME, modeling=True, mag_zeropoint=conf.MODELING_ZPT, skip_build=True)
# Make the PSF
logger.info(f'Mosaic loaded for {conf.MODELING_NICKNAME}')
modmosaic._make_psf(xlims=conf.MOD_REFF_LIMITS, ylims=conf.MOD_VAL_LIMITS, override=override, sextractor_only=sextractor_only, psfex_only=psfex_only)
logger.info(f'PSF made successfully for {conf.MODELING_NICKNAME}')
# Else if the user asks for a PSF in one of the bands
elif image_type is conf.MULTIBAND_NICKNAME:
# Sanity check
if band not in conf.BANDS:
raise ValueError(f'{band} is not a valid band nickname!')
# Use all bands or just one?
if band is not None:
sbands = [band,]
else:
sbands = conf.BANDS
# Loop over bands
for i, band in enumerate(sbands):
# Figure out PS selection box position and zeropoint
idx_band = np.array(conf.BANDS) == band
multi_xlims = np.array(conf.MULTIBAND_REFF_LIMITS)[idx_band][0]
multi_ylims = np.array(conf.MULTIBAND_VAL_LIMITS)[idx_band][0]
mag_zpt = np.array(conf.MULTIBAND_ZPT)[idx_band][0]
# Make the Mosaic
logger.info(f'Making PSF for {band}')
bandmosaic = Mosaic(band, mag_zeropoint = mag_zpt, skip_build=True)
# Make the PSF
logger.info(f'Mosaic loaded for {band}')
bandmosaic._make_psf(xlims=multi_xlims, ylims=multi_ylims, override=override, sextractor_only=sextractor_only, psfex_only=psfex_only)
if not sextractor_only:
logger.info(f'PSF made successfully for {band}')
else:
logger.info(f'interface.make_psf :: SExtraction complete for {band}')
return
def make_bricks(image_type=conf.MULTIBAND_NICKNAME, band=None, brick_id=None, insert=False, skip_psf=True, max_bricks=None, make_new_bricks=False):
""" Stage 1. Here we collect the detection, modelling, and multiband images for processing. We may also cut them up!
NB: PSFs can be automatically made at this stage too, assuming you've determined your PSF selection a priori.
"""
# Make bricks for the detection image
if (image_type==conf.DETECTION_NICKNAME) | (image_type is None):
# Detection
logger.info('Making mosaic for detection...')
detmosaic = Mosaic(conf.DETECTION_NICKNAME, detection=True)
if conf.NTHREADS > 1:
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# BUGGY DUE TO MEM ALLOC
# logger.info('Making bricks for detection (in parallel)')
# pool = mp.ProcessingPool(processes=conf.NTHREADS)
# pool.map(partial(detmosaic._make_brick, detection=True, overwrite=True), np.arange(0, detmosaic.n_bricks()))
logger.info('Making bricks for detection (in serial)')
for bid in np.arange(1, detmosaic.n_bricks()+1):
detmosaic._make_brick(bid, detection=True, overwrite=True)
# Make bricks for the modeling image
elif (image_type==conf.MODELING_NICKNAME) | (image_type is None):
# Modeling
logger.info('Making mosaic for modeling...')
modmosaic = Mosaic(conf.MODELING_NICKNAME, modeling=True)
# The user wants PSFs on the fly
if not skip_psf:
mod_xlims = np.array(conf.MOD_REFF_LIMITS)
mod_ylims = np.array(conf.MOD_VAL_LIMITS)
modmosaic._make_psf(xlims=mod_xlims, ylims=mod_ylims)
# Make bricks in parallel
if (conf.NTHREADS > 1) & (brick_id is None):
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# BUGGY DUE TO MEM ALLOC
# if conf.VERBOSE: print('Making bricks for detection (in parallel)')
# pool = mp.ProcessingPool(processes=conf.NTHREADS)
# pool.map(partial(modmosaic._make_brick, detection=True, overwrite=True), np.arange(0, modmosaic.n_bricks()))
# # Make bricks in serial
# else:
if brick_id is not None:
logger.info(f'Making brick #{brick_id} for modeling (in serial)')
modmosaic._make_brick(brick_id, modeling=True, overwrite=True)
else:
logger.info('Making bricks for modeling (in serial)')
if max_bricks is None:
max_bricks = modmosaic.n_bricks()
for bid in np.arange(1, max_bricks+1):
modmosaic._make_brick(bid, modeling=True, overwrite=True)
# Make bricks for one or more multiband images
elif (image_type==conf.MULTIBAND_NICKNAME) | (image_type is None):
# One variable list
if band is not None:
try:
if len(band) > 0:
sbands = band
else:
sbands = conf.BANDS
except:
sbands = [band,]
else:
sbands = conf.BANDS
# In serial, loop over images
for i, sband in enumerate(sbands):
# Assume we can overwrite files unless insertion is explicit
# First image w/o insertion will make new file anyways
if make_new_bricks:
overwrite = True
if insert | (i > 0):
overwrite = False
else:
overwrite=False
# Build the mosaic
logger.info(f'Making mosaic for image {sband}...')
bandmosaic = Mosaic(sband)
# The user wants PSFs made on the fly
if not skip_psf:
idx_band = np.array(conf.BANDS) == sband
multi_xlims = np.array(conf.MULTIBAND_REFF_LIMITS)[idx_band][0]
multi_ylims = np.array(conf.MULTIBAND_VAL_LIMITS)[idx_band][0]
bandmosaic._make_psf(xlims=multi_xlims, ylims=multi_ylims)
# Make bricks in parallel
if (conf.NTHREADS > 1) & (brick_id is None):
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# logger.info(f'Making bricks for band {sband} (in parallel)')
# with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
# logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
# pool.uimap(partial(bandmosaic._make_brick, detection=False, overwrite=overwrite), np.arange(0, bandmosaic.n_bricks()))
# logger.info('Parallel processing complete.')
# Make bricks in serial
# else:
if brick_id is not None:
logger.info(f'Making brick #{brick_id} for multiband (in serial)')
bandmosaic._make_brick(brick_id, detection=False, overwrite=overwrite)
else:
logger.info(f'Making bricks for band {sband} (in serial)')
if max_bricks is None:
max_bricks = bandmosaic.n_bricks()
for bid in np.arange(1, max_bricks+1):
bandmosaic._make_brick(bid, detection=False, overwrite=overwrite)
# image type is invalid
else:
raise RuntimeError(f'{image_type} is an unrecognized nickname (see {conf.DETECTION_NICKNAME}, {conf.MODELING_NICKNAME}, {conf.MULTIBAND_NICKNAME})')
return
def runblob(blob_id, blobs, modeling=None, catalog=None, plotting=0, source_id=None, source_only=False, blob_only=False):
""" Essentially a private function. Runs each individual blob and handles the bulk of the work. """
# if conf.NTHREADS != 0:
# fh = logging.FileHandler(f'B{blob_id}.log')
# fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
# formatter = logging.Formatter('[%(asctime)s] %(name)s :: %(levelname)s - %(message)s', '%H:%M:%S')
# fh.setFormatter(formatter)
# logger = pathos.logger(level=logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL), handler=fh)
logger = logging.getLogger(f'farmer.blob.{blob_id}')
logger.info(f'Starting on Blob #{blob_id}')
modblob = None
fblob = None
tstart = time.time()
logger.debug('Making weakref proxies of blobs')
if modeling is None:
modblob, fblob = weakref.proxy(blobs[0]), weakref.proxy(blobs[1])
elif modeling:
modblob = weakref.proxy(blobs)
else:
fblob = weakref.proxy(blobs)
logger.debug(f'Weakref made ({time.time() - tstart:3.3f})s')
# Make blob with modeling image
if modblob is not None:
logger.debug(f'Making blob with {conf.MODELING_NICKNAME}')
modblob.logger = logger
if modblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
del modblob
return catout
# If the user wants to just model a specific source...
if source_only & (source_id is not None):
logger.info(f'Preparing to model single source: {source_id}')
sid = modblob.bcatalog['source_id']
modblob.bcatalog = modblob.bcatalog[sid == source_id]
modblob.n_sources = len(modblob.bcatalog)
modblob.mids = np.ones(modblob.n_sources, dtype=int)
modblob.model_catalog = np.zeros(modblob.n_sources, dtype=object)
modblob.solution_catalog = np.zeros(modblob.n_sources, dtype=object)
modblob.solved_chisq = np.zeros(modblob.n_sources)
modblob.solved_bic = np.zeros(modblob.n_sources)
modblob.solution_chisq = np.zeros(modblob.n_sources)
modblob.tr_catalogs = np.zeros((modblob.n_sources, 3, 2), dtype=object)
modblob.chisq = np.zeros((modblob.n_sources, 3, 2))
modblob.rchisq = np.zeros((modblob.n_sources, 3, 2))
modblob.bic = np.zeros((modblob.n_sources, 3, 2))
assert(len(modblob.bcatalog) > 0)
if not blob_only:
if (conf.MODEL_PHOT_MAX_NBLOB > 0) & (modblob.n_sources > conf.MODEL_PHOT_MAX_NBLOB):
logger.info('Number of sources exceeds set limit. Skipping!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
catout['x'] += modblob.subvector[1]
catout['y'] += modblob.subvector[0]
del modblob
return catout
# Run models
if conf.ITERATIVE_SUBTRACTION_THRESH is None:
iter_thresh = 1E31
else:
iter_thresh = conf.ITERATIVE_SUBTRACTION_THRESH
if (conf.ITERATIVE_SUBTRACTION_THRESH is not None) & (modblob.n_sources >= iter_thresh):
logger.debug(f'Performing iterative subtraction for {conf.MODELING_NICKNAME}')
astart = time.time()
for i, band in enumerate(modblob.bands):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[modblob._band2idx(band_name)]
# sorting order
avg_flux = np.zeros(modblob.n_sources)
for i, item in enumerate(modblob.bcatalog):
rawfluxes = np.array([np.sum(img[modblob.segmap == item['source_id']]) for img in modblob.images])
fluxes = rawfluxes * 10**(-0.4 * (zpt - 23.9))
avg_flux[i] = np.mean(fluxes, 0)
index = np.argsort(avg_flux)[::-1] # sort by brightness
copy_images = modblob.images.copy()
import copy
modblob.solution_model_images = np.zeros_like(modblob.images)
for i, idx in enumerate(index):
logger.debug(f" ({i+1}/{modblob.n_sources}) Attemping to model source #{item['source_id']}")
itemblob = copy.deepcopy(modblob)
itemblob.bcatalog = Table(modblob.bcatalog[idx])
itemblob.n_sources = 1
itemblob.mids = np.ones(itemblob.n_sources, dtype=int)
itemblob.model_catalog = np.zeros(itemblob.n_sources, dtype=object)
itemblob.solution_catalog = np.zeros(itemblob.n_sources, dtype=object)
itemblob.solved_chisq = np.zeros(itemblob.n_sources)
itemblob.solved_bic = np.zeros(itemblob.n_sources)
itemblob.solution_chisq = np.zeros(itemblob.n_sources)
itemblob.tr_catalogs = np.zeros((itemblob.n_sources, 3, 2), dtype=object)
itemblob.chisq = np.zeros((itemblob.n_sources, 3, 2))
itemblob.rchisq = np.zeros((itemblob.n_sources, 3, 2))
itemblob.bic = np.zeros((itemblob.n_sources, 3, 2))
itemblob.images = copy_images
itemblob._is_itemblob = True
logger.debug(f'Staging images for {conf.MODELING_NICKNAME} -- blob #{modblob.blob_id}')
itemblob.stage_images()
logger.debug(f'Images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
logger.debug(f'Modeling images for {conf.MODELING_NICKNAME} -- blob #{modblob.blob_id}')
status = itemblob.tractor_phot()
if status:
logger.debug(f'Morphology determined. ({time.time() - astart:3.3f})s')
logger.debug(f'Transferring results back to parent blob...')
#transfer back
modblob.bcatalog[idx] = itemblob.bcatalog[0]
modblob.solution_model_images += itemblob.solution_model_images
# subtract model from image
copy_images -= itemblob.solution_model_images
else:
logger.warning(f'Morphology failed! ({time.time() - astart:3.3f})s')
# # if conf.NTHREADS != 0:
# # logger.removeHandler(fh)
# catout = modblob.bcatalog.copy()
# catout['x'] += modblob.subvector[1]
# catout['y'] += modblob.subvector[0]
# del modblob
# return catout
else:
astart = time.time()
logger.debug(f'Staging images for {conf.MODELING_NICKNAME}')
modblob.stage_images()
logger.debug(f'Images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
logger.debug(f'Modeling images for {conf.MODELING_NICKNAME}')
status = modblob.tractor_phot()
if not status:
logger.warning(f'Morphology failed! ({time.time() - astart:3.3f})s')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
catout['x'] += modblob.subvector[1]
catout['y'] += modblob.subvector[0]
del modblob
return catout
logger.debug(f'Morphology determined. ({time.time() - astart:3.3f})s')
# Run follow-up phot
if conf.DO_APPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual', 'weight', 'chisq',):
for band in modblob.bands:
if True: #try:
modblob.aperture_phot(band, img_type, sub_background=conf.SUBTRACT_BACKGROUND)
else:
logger.warning(f'Aperture photmetry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual'):
for band in modblob.bands:
try:
modblob.sep_phot(band, img_type, centroid='MODEL', sub_background=conf.SUBTRACT_BACKGROUND)
modblob.sep_phot(band, img_type, centroid='DETECTION', sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP photometry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEXPHOT:
for band in modblob.bands:
try:
modblob.residual_phot(band, sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP residual photmetry FAILED. Likely a bad blob.)')
duration = time.time() - tstart
logger.info(f'Solution for Blob #{modblob.blob_id} (N={modblob.n_sources}) arrived at in {duration:3.3f}s ({duration/modblob.n_sources:2.2f}s per src)')
catout = modblob.bcatalog.copy()
del modblob
#################### FORCED PHOTOMETRY ################################
if fblob is not None:
# make new blob with band information
logger.debug(f'Making blob with {conf.MULTIBAND_NICKNAME}')
fblob.logger = logger
if fblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
astart = time.time()
status = fblob.stage_images()
if not status:
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
logger.info(f'{len(fblob.bands)} images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
if modblob is not None:
fblob.model_catalog = modblob.solution_catalog.copy()
fblob.position_variance = modblob.position_variance.copy()
fblob.parameter_variance = modblob.parameter_variance.copy()
logger.info(f'Solution parameters transferred. ({time.time() - astart:3.3f})s')
else:
if catalog is None:
raise ValueError('Input catalog not supplied!')
else:
blobmask = np.ones(len(catalog))
if source_id is not None:
# If the user wants to just model a specific source...
logger.info(f'Preparing to force single source: {source_id}')
sid = catalog['source_id']
bid = catalog['blob_id']
fblob.bcatalog = catalog[(sid == source_id) & (bid == blob_id)]
fblob.n_sources = len(fblob.bcatalog)
fblob.mids = np.ones(fblob.n_sources, dtype=int)
fblob.model_catalog = np.zeros(fblob.n_sources, dtype=object)
fblob.solution_catalog = np.zeros(fblob.n_sources, dtype=object)
fblob.solved_chisq = np.zeros(fblob.n_sources)
fblob.solved_bic = np.zeros(fblob.n_sources)
fblob.solution_chisq = np.zeros(fblob.n_sources)
fblob.tr_catalogs = np.zeros((fblob.n_sources, 3, 2), dtype=object)
fblob.chisq = np.zeros((fblob.n_sources, 3, 2))
fblob.rchisq = np.zeros((fblob.n_sources, 3, 2))
fblob.bic = np.zeros((fblob.n_sources, 3, 2))
assert(len(fblob.bcatalog) > 0)
else:
if blob_id is not None:
blobmask = catalog['blob_id'] == blob_id
fblob.bcatalog = catalog[blobmask]
fblob.n_sources = len(fblob.bcatalog)
catalog = catalog[blobmask]
catalog['X_MODEL'] -= fblob.subvector[1] + fblob.mosaic_origin[1] - conf.BRICK_BUFFER + 1
catalog['Y_MODEL'] -= fblob.subvector[0] + fblob.mosaic_origin[0] - conf.BRICK_BUFFER + 1
fblob.model_catalog, good_sources = models_from_catalog(catalog, fblob)
if (good_sources == False).all():
logger.warning('All sources are invalid!')
catalog['X_MODEL'] += fblob.subvector[1] + fblob.mosaic_origin[1] - conf.BRICK_BUFFER + 1
catalog['Y_MODEL'] += fblob.subvector[0] + fblob.mosaic_origin[0] - conf.BRICK_BUFFER + 1
return catalog
fblob.position_variance = None
fblob.parameter_variance = None
fblob.bcatalog = catalog[good_sources]
fblob.n_sources = len(catalog)
if fblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
# Forced phot
astart = time.time()
logger.info(f'Starting forced photometry...')
status = fblob.forced_phot()
if not status:
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
logger.info(f'Force photometry complete. ({time.time() - astart:3.3f})s')
# Run follow-up phot
if conf.DO_APPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual', 'weight', 'chisq',):
for band in fblob.bands:
# try:
fblob.aperture_phot(band, img_type, sub_background=conf.SUBTRACT_BACKGROUND)
# except:
# logger.warning(f'Aperture photmetry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.PLOT > 0:
for i, sid in enumerate(fblob.bcatalog['source_id']):
for band in fblob.bands:
fig, ax = plt.subplots()
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_image'][i], c='k', ls='dashed')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_model'][i], c='b')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_isomodel'][i], c='g')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_residual'][i], c='r')
fig.savefig(os.path.join(conf.PLOT_DIR, f'aper_{band}_{sid}.pdf'))
if conf.DO_SEPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual',):
for band in fblob.bands:
try:
fblob.sep_phot(band, img_type, centroid='MODEL', sub_background=conf.SUBTRACT_BACKGROUND)
fblob.sep_phot(band, img_type, centroid='DETECTION', sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP photometry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEXPHOT:
for band in fblob.bands:
try:
fblob.residual_phot(band, sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP residual photmetry FAILED. Likely a bad blob.)')
duration = time.time() - tstart
logger.info(f'Solution for blob {fblob.blob_id} (N={fblob.n_sources}) arrived at in {duration:3.3f}s ({duration/fblob.n_sources:2.2f}s per src)')
catout = fblob.bcatalog.copy()
del fblob
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
return catout
def detect_sources(brick_id, catalog=None, segmap=None, blobmap=None, use_mask=True):
"""Now we can detect stuff and be rid of it!
Parameters
----------
brick_id : [type]
[description]
catalog : [type], optional
[description], by default None
segmap : [type], optional
[description], by default None
blobmap : [type], optional
[description], by default None
catalog : [type], optional
[description], by default None
use_mask : bool, optional
[description], by default True
Returns
-------
[type]
[description]
Raises
------
RuntimeError
[description]
ValueError
[description]
ValueError
[description]
ValueError
[description]
"""
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_logfile.log")
logging.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logging.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# Create detection brick
tstart = time.time()
detbrick = stage_brickfiles(brick_id, nickname=conf.DETECTION_NICKNAME, modeling=True, is_detection=True)
if detbrick is None:
return
logger.info(f'Detection brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Sextract sources
tstart = time.time()
if (segmap is None) & (catalog is None):
try:
detbrick.sextract(conf.DETECTION_NICKNAME, sub_background=conf.DETECTION_SUBTRACT_BACKGROUND, use_mask=use_mask, incl_apphot=conf.DO_APPHOT)
logger.info(f'Detection brick #{brick_id} sextracted {detbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
detbrick.is_borrowed = False
except:
raise RuntimeError(f'Detection brick #{brick_id} sextraction FAILED. ({time.time() - tstart:3.3f}s)')
return
# or find existing catalog/segmap info
elif (catalog == 'auto') | ((segmap is not None) & (catalog is not None) & (segmap is not None)):
if (catalog == 'auto'):
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
if os.path.exists(search_fn):
catalog = Table(fits.open(search_fn)[1].data)
else:
raise ValueError(f'No valid catalog was found for {brick_id}')
logger.info(f'Overriding SExtraction with external catalog. ({search_fn})')
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
segmap = hdul_seg['SEGMAP'].data
blobmap = hdul_seg['BLOBMAP'].data
else:
raise ValueError(f'No valid segmentation map was found for {brick_id}')
if conf.X_COLNAME is not 'x':
if 'x' in catalog.colnames:
if 'x_borrowed' in catalog.colnames:
catalog.remove_column('x_borrowed')
catalog['x'].name = 'x_borrowed'
catalog[conf.X_COLNAME].name = 'x'
if conf.Y_COLNAME is not 'y':
if 'y' in catalog.colnames:
if 'y_borrowed' in catalog.colnames:
catalog.remove_column('y_borrowed')
catalog['y'].name = 'y_borrowed'
catalog[conf.Y_COLNAME].name = 'y'
# catalog['x'] = catalog['x'] - detbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - detbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
detbrick.catalog = catalog
detbrick.n_sources = len(catalog)
detbrick.n_blobs = len(np.unique(catalog['blob_id']))
detbrick.is_borrowed = True
detbrick.segmap = segmap
detbrick.segmask = segmap.copy()
detbrick.segmask[segmap!=0] = 1
detbrick.blobmap = blobmap
else:
raise ValueError('No valid segmap, blobmap, and catalog provided to override SExtraction!')
return
if (~detbrick.is_borrowed):
detbrick.cleanup()
if conf.PLOT > 2:
plot_blobmap(detbrick, image=detbrick.images[0], band=conf.DETECTION_NICKNAME, mode='log')
plot_blobmap(detbrick, image=detbrick.images[0], band=conf.DETECTION_NICKNAME, mode='rms')
logger.info('Saving detection catalog...')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_{conf.DETECTION_NICKNAME}.fits')
tstart = time.time()
if os.path.exists(outpath) & (~conf.OVERWRITE):
logger.warning('Catalog file exists and I will not overwrite it!')
else:
detbrick.catalog.write(outpath, overwrite=conf.OVERWRITE)
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
# Save segmap and blobmaps
# if (~detbrick.is_borrowed):
tstart = time.time()
logger.info('Saving segmentation and blob maps...')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(outpath) & (~conf.OVERWRITE):
logger.warning('Segmentation file exists and I will not overwrite it!')
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU(data=detbrick.segmap, name='SEGMAP', header=detbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=detbrick.blobmap, name='BLOBMAP', header=detbrick.wcs.to_header()))
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
tstart = time.time()
# else:
# logger.info(f'You gave me a catalog and segmap, so I am not saving it again.')
# filen = open(os.path.join(conf.INTERIM_DIR, f'detbrick_N{brick_id}.pkl'), 'wb')
# dill.dump(detbrick, filen)
return detbrick
def make_models(brick_id, detbrick='auto', band=None, source_id=None, blob_id=None, multiband_model=len(conf.MODELING_BANDS)>1, source_only=False):
""" Stage 2. Detect your sources and determine the best model parameters for them """
if (band is None) & (len(conf.MODELING_BANDS) > 0):
modband = conf.MODELING_BANDS
addName = conf.MULTIBAND_NICKNAME
multiband_model = True
if (type(modband) == str) | (type(modband) == np.str_):
modband = [modband,]
else:
logger.warning(f'Disregarding MODELING_BANDS config parameter. Using {band} for modelling instead!')
if (type(band) == list) | (type(band) == np.ndarray):
multiband_model = True
modband = band
elif (type(band) == str) | (type(band) == np.str_):
multiband_model = False
modband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
addName = '_'.join(modband)
# create new logging file
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_{addName}_logfile.log")
logger.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logger.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# Warn user that you cannot plot while running multiprocessing...
if (source_id is None) & (blob_id is None):
if (conf.NBLOBS == 0) & (conf.NTHREADS > 1) & ((conf.PLOT > 0)):
conf.PLOT = 0
logger.warning('Plotting not supported while modeling in parallel!')
if detbrick=='auto':
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_{conf.DETECTION_NICKNAME}.fits')
if os.path.exists(outpath):
logger.info(f'Loading in catalog from {outpath}')
catalog = Table.read(outpath)
n_blobs = len(np.unique(catalog['blob_id']))
n_sources = len(catalog)
else:
raise RuntimeError(f'Catalog was not found at {outpath}')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(outpath):
logger.info(f'Loading in segmaps from {outpath}')
hdul = fits.open(outpath)
segmap = hdul['SEGMAP'].data
segmask = segmap.copy()
segmask[segmap>1] = 1
blobmap = hdul['BLOBMAP'].data
else:
raise RuntimeError(f'Segmaps were not found at {outpath}')
# filen = open(os.path.join(conf.INTERIM_DIR, f'detbrick_N{brick_id}.pkl'), 'rb')
# detbrick = dill.load(filen)
# Create modbrick
if band is None:
if not multiband_model:
img_names = [conf.MODELING_NICKNAME,]
mod_nickname = conf.MODELING_NICKNAME
elif multiband_model:
img_names = conf.MODELING_BANDS
for iname in img_names:
if iname not in conf.BANDS:
raise ValueError(f'{iname} is listed as a band to model, but is not found in conf.BANDS!')
mod_nickname = conf.MULTIBAND_NICKNAME
else:
if type(band) == list:
img_names = band
else:
img_names = [band,]
mod_nickname = conf.MULTIBAND_NICKNAME
# Loop over bands to do the modelling on -- if model in series!
eff_area = None
if not multiband_model:
for band_num, mod_band in enumerate(img_names):
tstart = time.time()
modbrick = stage_brickfiles(brick_id, band=mod_band, nickname=mod_nickname, modeling=~(modband[band_num] in conf.BANDS))
# catalog['x'] = catalog['x'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
if modbrick is None:
return
if (band is not None) & (band != conf.MODELING_NICKNAME):
modbrick.bands = [f'{conf.MODELING_NICKNAME}_{mod_band}',]
modbrick.n_bands = len(modbrick.bands)
else:
mod_band = conf.MODELING_NICKNAME
logger.info(f'Modeling brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Inform the user about the blob occupation distribution
logger.info('Blob Occupation Distribution')
for i in np.arange(5)+1:
n_blob = np.sum(catalog['N_BLOB'] == i)
logger.info(f' {i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
n_blob = np.sum(catalog['N_BLOB'] > i)
logger.info(f' >{i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
if conf.PLOT > 3:
plot_brick(modbrick, 0, band=mod_band)
plot_background(modbrick, 0, band=mod_band)
plot_mask(modbrick, 0, band=mod_band)
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(modbrick.bands):
hdul.append(fits.ImageHDU(data=modbrick.background_images[m], name=f'BACKGROUND_{mband}', header=modbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = modbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = modbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = modbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = modbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=modbrick.background_rms_images[m], name=f'RMS_{mband}', header=modbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(modbrick.weights[m]), name=f'UNC_{mband}', header=modbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
logger.debug(f'Brick #{brick_id} -- Image statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.images[0], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.weights[0], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(np.nonzero(modbrick.weights[0].flatten())), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {mod_band}')
logger.debug(f' Global: {modbrick.backgrounds[0, 0]:6.6f}')
logger.debug(f' RMS: {modbrick.backgrounds[0, 1]:6.6f}\n')
modbrick.catalog = catalog.copy()
modbrick.segmap = segmap
modbrick.n_sources = n_sources
modbrick.is_modeling = True
modbrick.blobmap = blobmap
modbrick.n_blobs = n_blobs
modbrick.segmask = segmask
# Transfer to MODBRICK
tstart = time.time()
if band_num > 0:
modbrick.n_blobs, modbrick.n_sources, modbrick.segmap, modbrick.segmask, modbrick.blobmap, modbrick.catalog = n_blobs, n_sources, segmap, segmask, blobmap, catalog
if modbrick.n_blobs <= 0:
logger.critical(f'Modeling brick #{brick_id} gained {modbrick.n_blobs} blobs! Quiting.')
return
modbrick.run_weights()
modbrick.run_background()
modbrick.add_columns(modbrick_name=mod_band, multiband_model = False) # doing on detbrick gets column names wrong
logger.info(f'Modeling brick #{brick_id} gained {modbrick.n_blobs} blobs with {modbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
# Run a specific source or blob
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
outcatalog = modbrick.catalog.copy()
# print('AHHHHH ', outcatalog['x', 'y'])
mosaic_origin = modbrick.mosaic_origin
# print('MOSAIC ORIGIN ', mosaic_origin)
brick_id = modbrick.brick_id
if source_id is not None:
blob_id = np.unique(modbrick.blobmap[modbrick.segmap == source_id])
if len(blob_id) == 1:
blob_id = blob_id[0]
else:
raise ValueError('Requested source is not in brick!')
if blob_id is not None:
if blob_id not in outcatalog['blob_id']:
raise ValueError(f'No blobs exist for requested blob id {blob_id}')
logger.info(f'Running single blob {blob_id}')
modblob = modbrick.make_blob(blob_id)
modblob.is_modeling=True
# if source_id is set, then look at only that source
if modblob.rejected:
raise ValueError('Requested blob is invalid')
if source_only & (source_id not in modblob.bcatalog['source_id']):
logger.warning('Requested source is not in blob!')
for source in modblob.bcatalog:
logger.warning(source['source_id'], source['cflux'])
raise ValueError('Requested source is not in blob!')
output_rows = runblob(blob_id, modblob, modeling=True, plotting=conf.PLOT, source_id=source_id, source_only=source_only)
output_cat = vstack(output_rows)
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
shape = np.shape(output_cat[colname][0])
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Else, production mode -- all objects in brick are to be run.
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
else:
run_n_blobs = modbrick.n_blobs
logger.info(f'Preparing to run {run_n_blobs} blobs.')
outcatalog = modbrick.catalog.copy()
mosaic_origin = modbrick.mosaic_origin
brick_id = modbrick.brick_id
logger.info('Generating blobs...')
astart = time.time()
modblobs = (modbrick.make_blob(i) for i in np.arange(1, run_n_blobs+1))
logger.info(f'{run_n_blobs} blobs generated ({time.time() - astart:3.3f}s)')
#del modbrick
tstart = time.time()
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
result = pool.uimap(partial(runblob, modeling=True, plotting=conf.PLOT, source_only=source_only), np.arange(1, run_n_blobs+1), modblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
logger.info('Serial processing initalized.')
output_rows = [runblob(kblob_id+1, kblob, modeling=True, plotting=conf.PLOT, source_only=source_only) for kblob_id, kblob in enumerate(modblobs)]
output_cat = vstack(output_rows)
# Estimate covariance
modbrick.bcatalog = output_cat
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = np.zeros(len(img_names))
for b, bname in enumerate(img_names):
eff_area[b] = modbrick.estimate_effective_area(output_cat, bname, modeling=True)[0]
ttotal = time.time() - tstart
logger.info(f'Completed {run_n_blobs} blobs with {len(output_cat)} sources in {ttotal:3.3f}s (avg. {ttotal/len(output_cat):2.2f}s per source)')
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
shape = np.shape(output_cat[colname][0])
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# open again and add
# If user wants model and/or residual images made:
if conf.MAKE_RESIDUAL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{modbrick.bands[0]}']]
modbrick.make_residual_image(catalog=cleancatalog, use_band_position=False, modeling=True)
elif conf.MAKE_MODEL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{modbrick.bands[0]}']]
modbrick.make_model_image(catalog=cleancatalog, use_band_position=False, modeling=True)
# Reconstuct mosaic positions of invalid sources
invalid = ~modbrick.catalog[f'VALID_SOURCE_{modbrick.bands[0]}']
# modbrick.catalog[invalid][f'X_MODEL_{modbrick.bands[0]}'] = modbrick.catalog[invalid]['x_orig'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
# modbrick.catalog[invalid][f'Y_MODEL_{modbrick.bands[0]}'] = modbrick.catalog[invalid]['y_orig'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
# print(np.sum(invalid), len(invalid))
# plt.pause(10)
# idx = np.argwhere(invalid)[:20]
# print(modbrick.catalog[idx][f'X_MODEL_{modbrick.bands[0]}'], np.array(modbrick.catalog[idx]['x_orig']) + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER)
# if multiband model is enabled...
elif multiband_model:
tstart = time.time()
modbrick = stage_brickfiles(brick_id, band=img_names, nickname=mod_nickname, modeling=True)
if modbrick is None:
return
# if detbrick.is_borrowed:
# catalog['x'] = catalog['x'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
modbrick.bands = [f'{conf.MODELING_NICKNAME}_{b}' for b in img_names]
modbrick.n_bands = len(modbrick.bands)
logger.info(f'Multi-band Modeling brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Inform the user about the blob occupation distribution
logger.info('Blob Occupation Distribution')
__, idx = np.unique(catalog['blob_id'], return_index=True)
for i in np.arange(5)+1:
n_blob = np.sum(catalog['N_BLOB'][idx] == i)
logger.info(f' {i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
n_blob = np.sum(catalog['N_BLOB'][idx] > i)
logger.info(f' >{i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
for i, mod_band in enumerate(modbrick.bands):
if conf.PLOT > 3:
plot_brick(modbrick, 0, band=mod_band)
plot_background(modbrick, 0, band=mod_band)
plot_mask(modbrick, 0, band=mod_band)
logger.debug(f'Brick #{brick_id} -- Image statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.images[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.weights[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(np.nonzero(modbrick.weights[i].flatten())), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {mod_band}')
logger.debug(f' Global: {modbrick.backgrounds[i, 0]:6.6f}')
logger.debug(f' RMS: {modbrick.backgrounds[i, 1]:6.6f}\n')
modbrick.catalog = catalog.copy()
modbrick.segmap = segmap
modbrick.n_sources = n_sources
modbrick.is_modeling = True
modbrick.blobmap = blobmap
modbrick.n_blobs = n_blobs
modbrick.segmask = segmask
# Cleanup on MODBRICK
tstart = time.time()
modbrick.shared_params = True ## CRITICAL THING TO DO HERE!
modbrick.add_columns(multiband_model=True) # doing on detbrick gets column names wrong
logger.info(f'Modeling brick #{brick_id} has {modbrick.n_blobs} blobs with {modbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
modbrick.run_weights()
modbrick.run_background()
if conf.PLOT > 3:
plot_blobmap(modbrick)
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(modbrick.bands):
hdul.append(fits.ImageHDU(data=modbrick.background_images[m], name=f'BACKGROUND_{mband}', header=modbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = modbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = modbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = modbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = modbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=modbrick.background_rms_images[m], name=f'RMS_{mband}', header=modbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(modbrick.weights[m]), name=f'UNC_{mband}', header=modbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
# Run a specific source or blob
blob_only=False
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
outcatalog = modbrick.catalog.copy()
# print('AHHHHH ', outcatalog['x', 'y'])
mosaic_origin = modbrick.mosaic_origin
# print('MOSAIC ORIGIN ', mosaic_origin)
brick_id = modbrick.brick_id
if source_id is not None:
blob_id = np.unique(modbrick.blobmap[modbrick.segmap == source_id])
if len(blob_id) == 1:
blob_id = blob_id[0]
else:
raise ValueError('Requested source is not in brick!')
if blob_id is not None:
if blob_id not in outcatalog['blob_id']:
raise ValueError(f'No blobs exist for requested blob id {blob_id}')
blob_only=True
logger.info(f'Running single blob for blob {blob_id}')
modblob = modbrick.make_blob(blob_id)
if modblob.rejected:
raise ValueError('Requested blob is invalid')
output_rows = runblob(blob_id, modblob, modeling=True, plotting=conf.PLOT, source_id=source_id, blob_only=blob_only, source_only=source_only)
output_cat = vstack(output_rows)
# Estimate covariance
modbrick.bcatalog = output_cat
# astart = time.time()
# logger.info(f'Starting covariance estimation...')
# status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
# logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
colshape = output_cat[colname].shape
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Else, production mode -- all objects in brick are to be run.
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
bid_arr = np.arange(1, run_n_blobs+1)
elif conf.MODEL_PHOT_MAX_NBLOB > 0:
bid_arr = np.unique(modbrick.catalog['blob_id'][modbrick.catalog['N_BLOB'] <= conf.MODEL_PHOT_MAX_NBLOB])
run_n_blobs = len(bid_arr)
if conf.NBLOBS > 0:
bid_arr = bid_arr[:conf.NBLOBS]
run_n_blobs = len(bid_arr)
else:
run_n_blobs = modbrick.n_blobs
bid_arr = np.arange(1, run_n_blobs+1)
logger.info(f'Preparing to run {run_n_blobs}/{modbrick.n_blobs} blobs.')
outcatalog = modbrick.catalog.copy()
mosaic_origin = modbrick.mosaic_origin
brick_id = modbrick.brick_id
logger.info('Generating blobs...')
astart = time.time()
modblobs = (modbrick.make_blob(i) for i in bid_arr)
logger.info(f'{run_n_blobs} blobs generated ({time.time() - astart:3.3f}s)')
#del modbrick
tstart = time.time()
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
result = pool.uimap(partial(runblob, modeling=True, plotting=conf.PLOT), bid_arr, modblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
logger.info('Serial processing initalized.')
output_rows = [runblob(kblob_id+1, kblob, modeling=True, plotting=conf.PLOT) for kblob_id, kblob in enumerate(modblobs)]
output_cat = vstack(output_rows)
ttotal = time.time() - tstart
logger.info(f'Completed {run_n_blobs} blobs with {len(output_cat)} sources in {ttotal:3.3f}s (avg. {ttotal/len(output_cat):2.2f}s per source)')
# Estimate covariance
modbrick.bcatalog = output_cat
# astart = time.time()
# logger.info(f'Starting covariance estimation...')
# status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
# logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = dict(zip(img_names, np.zeros(len(img_names))))
for b, bname in enumerate(img_names):
eff_area[bname] = modbrick.estimate_effective_area(output_cat, bname, modeling=True)[0]
else:
eff_area = None
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
colshape = np.shape(output_cat[colname])
if len(colshape) == 2:
colshape = (colshape[1],)
else:
colshape = (1,)
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Reconstuct mosaic positions of invalid sources
invalid = ~modbrick.catalog[f'VALID_SOURCE']
modbrick.catalog['x'] = modbrick.catalog['x'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
modbrick.catalog['y'] = modbrick.catalog['y'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
modbrick.catalog['x_orig'] = modbrick.catalog['x_orig'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
modbrick.catalog['y_orig'] = modbrick.catalog['y_orig'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
# If model bands is more than one, choose best one
# Choose based on min chisq
if (len(img_names) > 1) & ~multiband_model:
logger.info(f'Selecting best-fit models within {len(img_names)} bands')
name_arr = np.ones(shape=(len(modbrick.catalog), len(img_names)), dtype='U11')
score_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
valid_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
xmodel_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
ymodel_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
for i, mod_band in enumerate(img_names):
name_arr[:, i] = mod_band
score_arr[:, i] = modbrick.catalog[f'CHISQ_{conf.MODELING_NICKNAME}_{mod_band}']
xmodel_arr[:, i] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}_{mod_band}']
ymodel_arr[:, i] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}_{mod_band}']
valid_arr[:, i] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}_{mod_band}']
score_arr[np.logical_not(valid_arr[:,i]), i] = 1E31
argmin_score = np.argmin(score_arr, 1)
argmin_zero = np.min(score_arr, 1) == 1E31
argmin_zero = np.zeros_like(argmin_zero)
modbrick.catalog['BEST_MODEL_BAND'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(name_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['X_MODEL'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(xmodel_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['Y_MODEL'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(ymodel_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['VALID_SOURCE'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(valid_arr[~argmin_zero], argmin_score[~argmin_zero])]
# if modbrick.wcs is not None:
# skyc = self.brick_wcs.all_pix2world(modbrick.catalog[f'X_MODEL'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER, modbrick.catalog[f'Y_MODEL'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER, 0)
# modbrick.bcatalog[row][f'RA'] = skyc[0]
# modbrick.bcatalog[row][f'DEC'] = skyc[1]
# logger.info(f" Sky Model RA, Dec: {skyc[0]:6.6f} deg, {skyc[1]:6.6f} deg")
elif (len(img_names) > 1) & multiband_model:
modbrick.catalog['BEST_MODEL_BAND'] = conf.MODELING_NICKNAME
# modbrick.catalog['X_MODEL']
# modbrick.catalog['Y_MODEL'] # ???? WHAT
# modbrick.catalog['VALID_SOURCE']
elif img_names[0] != conf.MODELING_NICKNAME:
modbrick.catalog['BEST_MODEL_BAND'] = f'{conf.MODELING_NICKNAME}_{img_names[0]}'
modbrick.catalog['X_MODEL'] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}_{img_names[0]}']
modbrick.catalog['Y_MODEL'] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}_{img_names[0]}']
modbrick.catalog['VALID_SOURCE'] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}_{img_names[0]}']
else:
modbrick.catalog['BEST_MODEL_BAND'] = f'{conf.MODELING_NICKNAME}'
modbrick.catalog['X_MODEL'] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}']
modbrick.catalog['Y_MODEL'] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}']
modbrick.catalog['VALID_SOURCE'] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']
# write out cat
if conf.OUTPUT:
hdr = header_from_dict(conf.__dict__)
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in img_names:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b}', eff_area_deg, f'{conf.MODELING_NICKNAME} {band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(modbrick.catalog)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
outpath = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
hdul.writeto(outpath, output_verify='ignore', overwrite=conf.OVERWRITE)
logger.info(f'Wrote out catalog to {outpath}')
# If user wants model and/or residual images made:
if conf.MAKE_RESIDUAL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']]
modbrick.make_residual_image(catalog=cleancatalog, use_band_position=False, modeling=True)
elif conf.MAKE_MODEL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']]
modbrick.make_model_image(catalog=cleancatalog, use_band_position=False, modeling=True)
# close the brick_id specific file handlers
if conf.LOGFILE_LOGGING_LEVEL is not None:
new_fh.close()
logger.removeHandler(new_fh)
def force_photometry(brick_id, band=None, source_id=None, blob_id=None, insert=False, source_only=False, unfix_bandwise_positions=(not conf.FREEZE_FORCED_POSITION), unfix_bandwise_shapes=(not conf.FREEZE_FORCED_SHAPE), rao_cramer_only=False):
if band is None:
fband = conf.BANDS
addName = conf.MULTIBAND_NICKNAME
else:
if (type(band) == list) | (type(band) == np.ndarray):
fband = band
elif (type(band) == str) | (type(band) == np.str_):
fband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
addName = '_'.join(fband)
# create new logging file
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_{addName}_logfile.log")
logger.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logger.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# TODO Check if the catalog will be too big...
if ((not unfix_bandwise_positions) & (not unfix_bandwise_shapes)) | (len(fband) == 1):
force_models(brick_id=brick_id, band=band, source_id=source_id, blob_id=blob_id, insert=insert, source_only=source_only, force_unfixed_pos=False, use_band_shape=unfix_bandwise_shapes, rao_cramer_only=rao_cramer_only)
else:
if conf.FREEZE_FORCED_POSITION:
logger.warning('Setting FREEZE_FORCED_POSITION to False!')
conf.FREEZE_FORCED_POSITION = False
for b in fband:
tstart = time.time()
logger.critical(f'Running Forced Photometry on {b}')
if rao_cramer_only:
logger.critical('WARNING -- ONLY COMPUTING RAO-CRAMER FLUX ERRORS! THIS IS NOT A NORMAL MODE!')
logger.critical('ENSURE PLOTTING IS TURNED OFF!!!')
force_models(brick_id=brick_id, band=b, source_id=source_id, blob_id=blob_id, insert=insert, source_only=source_only, force_unfixed_pos=True, use_band_shape=unfix_bandwise_shapes, rao_cramer_only=rao_cramer_only)
logger.critical(f'Forced Photometry for {b} finished in {time.time() - tstart:3.3f}s')
# TODO -- compare valid source_band and add to catalog!
if conf.PLOT > 0: # COLLECT SRCPROFILES
logger.info('Collecting srcprofile diagnostic plots...')
if (blob_id is None) & (source_id is None):
import glob
# find sids
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B*_S*_*_srcprofile.pdf'))
sids= []
for f in files:
tsid = int(f[len(conf.PLOT_DIR):].split('S')[1].split('_')[0])
if tsid not in sids:
sids.append(tsid)
for sid in sids:
logger.debug(f' * source {sid}')
fnames = []
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B*_S{sid}_*_srcprofile.pdf'))
if len(files) == 0:
logger.error('Source {source_id} does not have any srcprofile plots to collect!')
return
bid = int(files[0][len(conf.PLOT_DIR):].split('B')[1].split('_')[0])
for b in fband:
logger.debug(f' *** adding {b}')
fname = os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_{b}_srcprofile.pdf')
if os.path.exists(fname):
fnames.append(fname)
else:
logger.warning(f' *** {b} does not exist at {fname}')
# collect
from PyPDF2 import PdfFileMerger
merger = PdfFileMerger()
for pdf in fnames:
merger.append(pdf)
logger.debug(f'Writing out combined srcprofile...')
merger.write(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_srcprofile.pdf'))
merger.close()
# remove
logger.debug(f'Removing individual srcprofiles...')
[os.system(f'rm {fname}') for fname in fnames]
else:
import glob
# find sids
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{blob_id}_S*_*_srcprofile.pdf'))
sids= []
for f in files:
tsid = int(f[len(conf.PLOT_DIR):].split('S')[1].split('_')[0])
if tsid not in sids:
sids.append(tsid)
for sid in sids:
logger.debug(f' * source {sid}')
fnames = []
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{blob_id}_S{sid}_*_srcprofile.pdf'))
if len(files) == 0:
logger.error('Source {source_id} does not have any srcprofile plots to collect!')
return
bid = int(files[0][len(conf.PLOT_DIR):].split('B')[1].split('_')[0])
for b in fband:
logger.debug(f' *** adding {b}')
fname = os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_{b}_srcprofile.pdf')
if os.path.exists(fname):
fnames.append(fname)
else:
logger.warning(f' *** {b} does not exist at {fname}')
# collect
from PyPDF2 import PdfFileMerger
merger = PdfFileMerger()
for pdf in fnames:
merger.append(pdf)
logger.debug(f'Writing out combined srcprofile...')
merger.write(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_srcprofile.pdf'))
merger.close()
# remove
logger.debug(f'Removing individual srcprofiles...')
[os.system(f'rm {fname}') for fname in fnames]
def force_models(brick_id, band=None, source_id=None, blob_id=None, insert=True, source_only=False, force_unfixed_pos=(not conf.FREEZE_FORCED_POSITION), use_band_shape=(not conf.FREEZE_FORCED_SHAPE), rao_cramer_only=False):
""" Stage 3. Force the models on the other images and solve only for flux. """
# Create and update multiband brick
tstart = time.time()
eff_area = None
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
if (source_id is None) & (blob_id is None):
if (conf.NBLOBS == 0) & (conf.NTHREADS > 1) & (conf.PLOT > 0):
conf.PLOT = 0
logger.warning('Plotting not supported while forcing models in parallel!')
if band is None:
fband = conf.BANDS
else:
if (type(band) == list) | (type(band) == np.ndarray):
fband = band
elif (type(band) == str) | (type(band) == np.str_):
fband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
fbrick = stage_brickfiles(brick_id, nickname=conf.MULTIBAND_NICKNAME, band=fband, modeling=False)
if fbrick is None:
return
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
if os.path.exists(search_fn):
fbrick.catalog = Table(fits.open(search_fn)[1].data)
fbrick.n_sources = len(fbrick.catalog)
fbrick.n_blobs = np.unique(fbrick.catalog['blob_id']) #.max()
else:
logger.critical(f'No valid catalog was found for {brick_id}')
return
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
fbrick.segmap = hdul_seg['SEGMAP'].data
fbrick.blobmap = hdul_seg['BLOBMAP'].data
fbrick.segmask = fbrick.segmap.copy()
fbrick.segmask[fbrick.segmap>0] = 1
else:
logger.critical(f'No valid segmentation map was found for {brick_id}')
return
if (~fbrick.catalog['VALID_SOURCE_MODELING']).all():
logger.critical(f'All sources in brick #{brick_id} are invalid. Quitting!')
return
uniq_src, index_src = np.unique(fbrick.catalog['source_id'], return_index=True)
if len(uniq_src) != len(fbrick.catalog):
n_nonuniq = len(fbrick.catalog) - len(uniq_src)
logger.warning(f'Removing {n_nonuniq} non-unique sources from catalog!')
fbrick.catalog = fbrick.catalog[index_src]
if not rao_cramer_only:
fbrick.add_columns(modeling=False)
else:
filler = np.zeros(len(fbrick.catalog))
for colname in fbrick.bands:
colname = colname.replace(' ', '_')
fbrick.catalog.add_column(Column(filler, name=f'RAW_DIRECTFLUX_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'RAW_DIRECTFLUXERR_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'DIRECTFLUX_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'DIRECTFLUXERR_{colname}'))
fbrick.run_background()
fbrick.run_weights()
logger.info(f'{conf.MULTIBAND_NICKNAME} brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
if conf.PLOT > 3:
for plt_band in fband:
if (len(fband) == 1) | force_unfixed_pos:
idx = 0
else:
idx = np.argwhere(np.array(fband)==plt_band)[0][0]
plot_brick(fbrick, idx, band=plt_band)
plot_background(fbrick, idx, band=plt_band)
plot_mask(fbrick, idx, band=plt_band)
fcat = fbrick.catalog.copy()
fcat['x'] -= fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1
fcat['y'] -= fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1
plot_blobmap(fbrick, image=fbrick.images[idx], band=plt_band, catalog=fcat)
for i, vb_band in enumerate(fband):
logger.debug(f'Brick #{brick_id} -- Image statistics for {vb_band}')
shape, minmax, mean, var = stats.describe(fbrick.images[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {vb_band}')
ok = fbrick.weights[i] > 0
shape, minmax, mean, var = stats.describe(fbrick.weights[i][ok].flatten(), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {vb_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(fbrick.weights[i][ok].flatten()), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {vb_band}')
logger.debug(f' Global: {fbrick.backgrounds[i, 0]:6.6f}')
logger.debug(f' RMS: {fbrick.backgrounds[i, 1]:6.6f}')
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(fbrick.bands):
hdul.append(fits.ImageHDU(data=fbrick.background_images[m], name=f'BACKGROUND_{mband}', header=fbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = fbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = fbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = fbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = fbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=fbrick.background_rms_images[m], name=f'RMS_{mband}', header=fbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(fbrick.weights[m]), name=f'UNC_{mband}', header=fbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
logger.info(f'Forcing models on {len(fband)} {conf.MULTIBAND_NICKNAME} bands')
# if conf.FORCE_SHARE_PARAMS:
# fbrick.shared_params = True
tstart = time.time()
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
if source_id is not None:
blob_id = np.unique(fbrick.blobmap[fbrick.segmap == source_id])
assert(len(blob_id) == 1)
blob_id = blob_id[0]
fblob = fbrick.make_blob(blob_id)
if source_only & (source_id not in fbrick.catalog['source_id']):
logger.warning('Requested source is not in blob!')
for source in fbrick.catalog:
logger.debug(source['source_id'], source['cflux'])
raise ValueError('Requested source is not in blob!')
if rao_cramer_only:
output_rows = runblob_rc(blob_id, fblob, catalog=fbrick.catalog, source_id=source_id)
else:
output_rows = runblob(blob_id, fblob, modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT, source_id=source_id)
output_cat = vstack(output_rows)
fbrick.bcatalog = output_cat
# Estimate covariance
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = fbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
if not conf.OUTPUT:
logging.warning('OUTPUT is DISABLED! Quitting...')
else:
if insert & conf.OVERWRITE & (conf.NBLOBS==0):
# open old cat
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
# make fillers
for colname in np.array(output_cat.colnames)[newcols]:
#mastercat.add_column(output_cat[colname])
if colname not in mastercat.colnames:
if np.ndim(output_cat[colname]) > 1:
shape = np.shape(output_cat[colname][1])
else:
shape = 1
mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=shape, name=colname))
for row in output_cat:
mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]] = row
# coordinate correction
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
# save
mastercat.write(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat'), format='fits', overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
else:
for colname in output_cat.colnames:
if colname not in fbrick.catalog.colnames:
if np.ndim(output_cat[colname]) > 1:
shape = np.shape(output_cat[colname][1])
else:
shape = 1
fbrick.catalog.add_column(Column(length=len(fbrick.catalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#fbrick.catalog = join(fbrick.catalog, output_cat, join_type='left', )
for row in output_cat:
fbrick.catalog[np.where(fbrick.catalog['source_id'] == row['source_id'])[0]] = row
mode_ext = conf.MULTIBAND_NICKNAME
if fband is not None:
if len(fband) == 1:
mode_ext = fband[0].replace(' ', '_')
else:
mode_ext = conf.MULTIBAND_NICKNAME
# write out cat
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
if conf.OUTPUT:
fbrick.catalog.write(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{mode_ext}.cat'), format='fits', overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new {mode_ext} catalog file.')
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
else:
run_n_blobs = fbrick.n_blobs
fblobs = (fbrick.make_blob(i) for i in np.unique(fbrick.catalog['blob_id'].data))
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
if rao_cramer_only:
result = pool.uimap(partial(runblob_rc, catalog=fbrick.catalog), np.arange(1, run_n_blobs+1), fblobs)
else:
result = pool.uimap(partial(runblob, modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT), np.arange(1, run_n_blobs+1), fblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
if rao_cramer_only:
output_rows = [runblob_rc(kblob_id, fbrick.make_blob(kblob_id), catalog=fbrick.catalog) for kblob_id in np.arange(1, run_n_blobs+1)]
else:
output_rows = [runblob(kblob_id, fbrick.make_blob(kblob_id), modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT) for kblob_id in np.arange(1, run_n_blobs+1)]
logger.info(f'Completed {run_n_blobs} blobs in {time.time() - tstart:3.3f}s')
#output_rows = [x for x in output_rows if x is not None]
output_cat = vstack(output_rows) # HACK -- at some point this should just UPDATE the bcatalog with the new photoms. IF the user sets NBLOBS > 0, the catalog is truncated!
uniq_src, idx_src = np.unique(output_cat['source_id'], return_index=True)
# if len(idx_src) != len(fbrick.catalog):
# raise RuntimeError(f'Output catalog is truncated! {len(idx_src)} out of {len(fbrick.catalog)}')
if len(uniq_src) < len(output_cat):
logger.warning(f'Found {len(uniq_src)} unique sources, out of {len(output_cat)} -- CLEANING!')
output_cat = output_cat[idx_src]
else:
logger.debug(f'Found {len(uniq_src)} unique sources, out of {len(output_cat)}')
# Estimate covariance
fbrick.bcatalog = output_cat
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = fbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = dict(zip(fband, np.zeros(len(fband))))
for b, bname in enumerate(fband):
eff_area[bname] = fbrick.estimate_effective_area(output_cat, bname, modeling=False)[0]
else:
eff_area = None
if not conf.OUTPUT:
logging.warning('OUTPUT is DISABLED! Quitting...')
else:
if insert & conf.OVERWRITE & (conf.NBLOBS==0) & (not force_unfixed_pos):
# open old cat
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
# make fillers
for colname in np.array(output_cat.colnames)[newcols]:
if colname not in mastercat.colnames:
if np.ndim(output_cat[colname]) > 1:
colshape = np.shape(output_cat[colname][1])
else:
colshape = 1
mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
for row in output_cat:
mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]] = row
# coordinate correction
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
# save
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' not in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat'), overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
outcatalog = mastercat
else:
logger.critical(f'Catalog file for brick #{fbrick.brick_id} could not be found!')
return
elif (not insert) & force_unfixed_pos:
# make a new MULITBAND catalog or add to it!
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{conf.MULTIBAND_NICKNAME}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
if np.sum(newcols) == 0:
logger.warning('Columns exist in catalog -- defaulting to separate file output!')
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{conf.MULTIBAND_NICKNAME}.cat'), overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new catalog file.')
else:
join_cat = output_cat[list(np.array(output_cat.colnames)[newcols])]
join_cat.add_column(output_cat['source_id'])
mastercat = join(mastercat, join_cat, keys='source_id', join_type='left')
# # add new columns, filled.
# newcolnames = []
# for colname in np.array(output_cat.colnames)[newcols]:
# if colname not in mastercat.colnames:
# if colname.startswith('FLUX_APER') | colname.startswith('MAG_APER'):
# mastercat.add_column(Column(length=len(mastercat), dtype=float, shape=(len(conf.APER_PHOT),), name=colname))
# else:
# mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=(1,), name=colname))
# newcolnames.append(colname)
# # if colname.startswith('FLUX_APER') | colname.startswith('MAG_APER'):
# # mastercat.add_column(Column(length=len(mastercat), dtype=float, shape=(len(conf.APER_PHOT),), name=colname))
# # else:
# # mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=(1,), name=colname))
# # [print(j) for j in mastercat.colnames]
# # [print(j) for j in output_cat.colnames]
# # count = 0
# # for row in output_cat:
# # idx = np.where(mastercat['source_id'] == row['source_id'])[0]
# for colname in newcolnames:
# mastercat[colname][idx] = output_cat[colname]
# # print(mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]][newcolnames])
# # print(newcolnames)
# # print(row[newcolnames])
# # print(np.where(mastercat['source_id'] == row['source_id'])[0])
# mastercat[newcolnames][idx] = row[newcolnames]
# count+=1
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' not in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(path_mastercat, overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
else:
mastercat = output_cat
hdr = header_from_dict(conf.__dict__)
# hdr = fits.open(path_mastercat)['CONFIG'].header
# lastb = 0
# for b in np.arange(99):
# if 'AREA{b}' not in hdr.keys():
lastb = 0
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(path_mastercat, overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new catalog file.')
outcatalog = mastercat
else:
for colname in output_cat.colnames:
if colname not in fbrick.catalog.colnames:
colshape = | np.shape(output_cat[colname]) | numpy.shape |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from os import scandir, path # scandir introduced in py3.x
except:
pass
from os import system, chdir
from copy import deepcopy
from scipy.interpolate import splrep, splev
from scipy.signal import savgol_filter, medfilt
from scipy.optimize import minimize
from astropy.io import fits
from common_helper_functions import _order_exposures_by_key, _valid_orders_from_keys, correct_wvl_for_rv, _combine_orders, _spectra_resample
from rv_helper_functions import get_RV_ref_spectrum, get_RV_custom_corr_perorder, get_RV_custom_corr_combined, add_rv_to_metadata
norm_suffix = '_normalised.txt'
sigma_norm_suffix = '_sigma_normalised.txt'
def _go_to_dir(path):
try:
system('mkdir ' + path)
except:
pass
chdir(path)
def _get_reduced_exposures(in_dir):
"""
:param in_dir:
:return:
"""
return [f.name for f in scandir(in_dir) if f.is_dir()]
def _get_normalised_orders(spec_dir, spectrum):
"""
:param spec_dir:
:param spectrum:
:return:
"""
existing_orders = []
for i_o in range(35):
spec_path = spec_dir + spectrum + '/' + spectrum + '_' + str(i_o)
if path.isfile(spec_path + norm_suffix) and path.isfile(spec_path + norm_suffix):
existing_orders.append(spectrum + '_' + str(i_o))
return existing_orders
def get_orderdata_by_wavelength(spec_dir, orders, in_wvl):
"""
:param spec_dir:
:param orders:
:param in_wvl:
:return:
"""
for order in orders:
flux_data = np.loadtxt(spec_dir + order + norm_suffix)
#sigma_data = np.loadtxt(spec_dir + order + sigma_norm_suffix)
sigma_data = deepcopy(flux_data)
sigma_data[:, 1] = 0.
if np.nanmin(flux_data[:, 0]) <= in_wvl <= np.nanmax(flux_data[:, 0]):
return flux_data[:, 0], flux_data[:, 1], sigma_data[:, 1]
return None, None, None
def get_spectral_data(star, wvl_orders, in_dir,
new_only=False):
"""
:param star:
:param wvl_orders:
:param in_dir:
:param new_only:
:return:
"""
input_dir = in_dir + star + '/spec/'
list_exposures = _get_reduced_exposures(input_dir)
# create a dictionary of all exposures with their belonging data
star_data_all = {}
for exposure in list_exposures:
if 'joined' in exposure:
# skipp exposures/spectra that are created by joining multiple exposures
continue
if new_only:
if 'ec.vh' not in exposure:
# skipp older exposures that might be of worse quality
continue
# get all possible orders
print('Exploring orders of exposure:', exposure)
all_norm_orders = _get_normalised_orders(input_dir, exposure)
if len(all_norm_orders) > 0:
# create new dictionary that will hold the data of selected order for a given exposure
star_data_all[exposure] = {}
# if available read vhelio velocity from the original reduction fits file
vh_key = 'VHELIO'
vhelio = np.nan
# open and read original reduced fits file
orig_fits = fits.open(input_dir + exposure + '.fits')
header_fits = orig_fits[0].header
if vh_key in header_fits.keys():
vhelio = header_fits[vh_key]
orig_fits.close()
# add vhelio velocity to the data structure
star_data_all[exposure][vh_key] = vhelio
# read data of individual orders and save them into the structure
for get_wvl_order in wvl_orders:
order_data = get_orderdata_by_wavelength(input_dir + exposure + '/',
all_norm_orders, get_wvl_order)
if order_data[0] is not None:
star_data_all[exposure][get_wvl_order] = {
'wvl': order_data[0],
'flx': order_data[1],
'sig': order_data[2],
'flx1': np.ones_like(order_data[1]),
'flx2': np.zeros_like(order_data[1]),
}
# add RV flags, which determines which of the orders can be used for RV estimation
#star_data_all['RV_s1_use'] =
#star_data_all['RV_s2_use'] =
return star_data_all
def create_new_reference(exposures_all, target_wvl,
percentile=None, w_filt=None,
use_flx_key='flx', use_rv_key='RV_s1',
plot_combined=False, plot_path='plot_combined.png',
plot_shifted=False):
"""
:param exposures_all:
:param target_wvl:
:param percentile:
:param w_filt:
:param use_flx_key:
:param use_rv_key:
:param plot_combined:
:param plot_path:
:param plot_shifted:
:return:
"""
flx_new = list([])
all_exp_ids = _order_exposures_by_key(exposures_all, exposures_all.keys(),
sort_key=use_rv_key)
for exposure_id in all_exp_ids:
exposure_data = exposures_all[exposure_id]
# combine all resampled and RV moved spectra
exposure_new_flx = _combine_orders(exposure_data, target_wvl,
use_flx_key=use_flx_key, use_rv_key=use_rv_key)
flx_new.append(exposure_new_flx)
# compute median of all considered exposures
flx_new = np.array(flx_new)
flx_new_median = np.nanmedian(flx_new, axis=0)
flx_new_std = np.nanstd(flx_new, axis=0)
idx_median = np.isfinite(flx_new_median)
wvl_range = (np.min(target_wvl[idx_median]) - 2.,
np.max(target_wvl[idx_median]) + 2.)
n_spectra = flx_new.shape[0]
x_ticks = range(4500, 7000, 20)
x_ticks_str = [str(xt) for xt in x_ticks]
# plot combined spectra - all around normalized level of 1
if plot_combined:
fig, ax = plt.subplots(2, 1, figsize=(135, 6.), sharex=True)
# plot individual spectra and final combined spectrum
for i_ex in range(n_spectra):
ax[0].plot(target_wvl, flx_new[i_ex, :], lw=0.5, alpha=0.33)
ax[0].plot(target_wvl, flx_new_median, c='black', lw=0.8)
if w_filt is not None:
ax[0].plot(target_wvl, medfilt(flx_new_median, w_filt), c='green', lw=0.5)
ax[0].set(xlim=wvl_range, ylim=np.nanpercentile(flx_new_median, [0.4, 99.6]),
# xlabel='Wavelength [A]',
# xticks=x_ticks, xticklabels=x_ticks_str,
ylabel='Normalized flux')
# plot deviations from the reference spectrum - could be used for RV bad wavelength masking
flx_new_std = np.nanstd(flx_new - flx_new_median, axis=0)
for i_ex in range(n_spectra):
ax[1].plot(target_wvl, flx_new[i_ex, :] - flx_new_median, lw=0.5, alpha=0.33)
ax[1].set(xlim=wvl_range, ylim=[-0.04, 0.04],
# xticks=x_ticks, xticklabels=x_ticks_str,
xlabel='Wavelength [A]', ylabel='Flux diff')
ax[1].plot(target_wvl, flx_new_std, c='black', lw=0.8)
ax[1].plot(target_wvl, -flx_new_std, c='black', lw=0.8)
# final plot visual corrections
ax[0].grid(ls='--', alpha=0.2, color='black')
ax[1].grid(ls='--', alpha=0.2, color='black')
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
fig.savefig(plot_path, dpi=150)
plt.close(fig)
# plot combined and shifted spectra - every spectrum shifted for a certain flux offset level
if plot_shifted:
# compute function to be plotted as deviations around median flux value
fill_1 = np.nanpercentile(flx_new, 15, axis=0)
fill_2 = np.nanpercentile(flx_new, 85, axis=0)
idx_fill = np.logical_and(np.isfinite(fill_1), np.isfinite(fill_2))
# start plotting
y_range = np.nanpercentile(flx_new_median, [0.4, 99.6])
flx_offset = 0.75 * (y_range[1] - y_range[0]) # half of expected y range
fig, ax = plt.subplots(1, 1, figsize=(90, 3. + 0.8 * n_spectra))
for i_ex in range(n_spectra):
ax.plot(target_wvl, flx_new[i_ex, :] + (flx_offset * (i_ex + 1)), lw=0.6, alpha=0.8)
ax.text(wvl_range[0]+5, 1 + + (flx_offset * (i_ex + 1)), all_exp_ids[i_ex].split('.')[0],
fontsize=10, va='center')
# ax.fill_between(target_wvl, fill_1, fill_2,
# color='lightgrey', where=idx_fill)
ax.fill_between(target_wvl, flx_new_median-flx_new_std, flx_new_median+flx_new_std,
color='lightgrey', where=idx_fill)
ax.plot(target_wvl, flx_new_median, c='black', lw=0.8)
ax.set(xlim=wvl_range,
ylim=y_range + np.array([0, flx_offset * n_spectra]),
# xticks=x_ticks, xticklabels=x_ticks_str,
xlabel='Wavelength [A]', ylabel='Normalized and shifted flux')
ax.grid(ls='--', alpha=0.2, color='black')
fig.tight_layout()
fig.savefig(plot_path[:-4] + '_shifted.png', dpi=150)
plt.close(fig)
# return rv corrected and computed median combination of individual exposures
if percentile is None:
flx_final = flx_new_median # / np.nanpercentile(flx_new_median, 80)
else:
flx_new_perc = np.nanpercentile(flx_new, percentile, axis=0)
flx_final = flx_new_perc # / np.nanpercentile(flx_new_median, 80)
# apply median filtering if requested
if w_filt is not None:
flx_final = medfilt(flx_final, w_filt)
# return new median combined spectrum
return flx_final, flx_new_std
def _evaluate_norm_fit(orig, fit, idx, sigma_low, sigma_high):
"""
:param orig:
:param fit:
:param idx:
:param sigma_low:
:param sigma_high:
:return:
"""
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def _spectra_normalize(wvl, spectra_orig,
steps=5, sigma_low=2., sigma_high=2.5, window=15, order=5, n_min_perc=5.,
func='cheb', fit_on_idx=None, fit_mask=None, sg_filter=False,
return_fit=False, return_idx=False, median_init=True):
"""
:param wvl:
:param spectra_orig:
:param steps:
:param sigma_low:
:param sigma_high:
:param window:
:param order:
:param n_min_perc:
:param func:
:param fit_on_idx:
:param fit_mask:
:param sg_filter:
:param return_fit:
:param return_idx:
:return:
"""
# perform sigma clipping before the next fitting cycle
idx_fit = np.logical_and(np.isfinite(wvl), np.isfinite(spectra_orig))
spectra = np.array(spectra_orig)
if fit_mask is not None:
idx_fit = np.logical_and(idx_fit, fit_mask)
if fit_on_idx is not None:
idx_fit = np.logical_and(idx_fit, fit_on_idx)
steps = 1 # no clipping performed, one iteration, forced fitting on selected pixels
else:
# filter noisy original spectra, so it is easier to determine continuum levels
if sg_filter:
spectra = savgol_filter(spectra_orig, window_length=15, polyorder=5)
if median_init:
init_fit = np.nanmedian(spectra)
idx_fit = _evaluate_norm_fit(spectra, init_fit, idx_fit, sigma_low*2.5, sigma_high*2.5)
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
for i_f in range(steps): # number of sigma clipping steps
# print i_f
if func == 'cheb':
chb_coef = np.polynomial.chebyshev.chebfit(wvl[idx_fit], spectra[idx_fit], order)
cont_fit = np.polynomial.chebyshev.chebval(wvl, chb_coef)
if func == 'legen':
leg_coef = np.polynomial.legendre.legfit(wvl[idx_fit], spectra[idx_fit], order)
cont_fit = np.polynomial.legendre.legval(wvl, leg_coef)
if func == 'poly':
poly_coef = np.polyfit(wvl[idx_fit], spectra[idx_fit], order)
cont_fit = np.poly1d(poly_coef)(wvl)
if func == 'spline':
# if i_f == 1:
# chb_coef = np.polynomial.chebyshev.chebfit(wvl[idx_fit], spectra[idx_fit], 5)
# cont_fit = np.polynomial.chebyshev.chebval(wvl, chb_coef)
# idx_fit = _evaluate_norm_fit(spectra, cont_fit, idx_fit, sigma_low, sigma_high)
spline_coef = splrep(wvl[idx_fit], spectra[idx_fit], k=order, s=window)
cont_fit = splev(wvl, spline_coef)
# print(i_f, 'points:', n_fit_points_prev, 'knots:', len(spline_coef[0]))
idx_fit = _evaluate_norm_fit(spectra, cont_fit, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
if return_fit:
if return_idx:
return cont_fit, idx_fit
else:
return cont_fit
else:
return spectra_orig / cont_fit
def renorm_exposure_perorder(exposure_data, ref_flx, ref_wvl,
use_rv_key='RV_s1',
input_flx_key='flx',
output_flx_key='flx_renorm',
plot=False, plot_path=None):
"""
:param exposure_data:
:param ref_flx:
:param ref_wvl:
:param use_rv_key:
:param input_flx_key:
:param output_flx_key:
:param plot:
:param plot_path:
:return:
"""
print(' Input normalization flux key is', input_flx_key, 'and RV key is', use_rv_key)
rv_val_star = exposure_data[use_rv_key]
if not np.isfinite(rv_val_star):
rv_val_star = 0
# shift reference spectrum from stars' rest to barycentric/observed reference frame - use reversed RV value
ref_wvl_shifted = correct_wvl_for_rv(ref_wvl, -1.*rv_val_star)
echelle_orders = _valid_orders_from_keys(exposure_data.keys())
# loop trough all available Echelle orders
for echelle_order_key in echelle_orders:
# determine observed data that will be used in the correlation procedure
order_flx = exposure_data[echelle_order_key][input_flx_key]
order_wvl = exposure_data[echelle_order_key]['wvl']
# resample reference spectrum to the observed wavelength pixels
ref_flx_order = _spectra_resample(ref_flx, ref_wvl_shifted, order_wvl)
# perform renormalization using the supplied reference spectrum
# get renormalization curve by comparing reference and observed spectrum
try:
wvl_len = len(order_wvl)
ref_flx_norm_curve = _spectra_normalize(np.arange(wvl_len), order_flx / ref_flx_order,
steps=10, sigma_low=2.5, sigma_high=2.5, n_min_perc=8.,
order=4, func='cheb', return_fit=True)
# renorm order
exposure_data[echelle_order_key][output_flx_key] = order_flx / ref_flx_norm_curve
if plot:
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(15, 5))
ax[0].plot(order_wvl, order_flx, lw=0.5, label='Original')
ax[0].plot(order_wvl, ref_flx_order, lw=0.5, label='Reference')
ax[0].plot(order_wvl, order_flx / ref_flx_norm_curve, lw=0.5, label='Renormed')
ax[1].plot(order_wvl, order_flx / ref_flx_order, lw=0.5)
ax[1].plot(order_wvl, ref_flx_norm_curve, lw=0.5)
ax[1].set(xlim=[order_wvl[0]-0.2, order_wvl[-1]+0.2])
ax[0].legend()
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
if plot_path is None:
fig.show()
else:
fig.savefig(plot_path[:-4] + '_' + str(echelle_order_key) + '.png', dpi=150)
plt.close(fig)
except Exception as e:
print(' Renormalization problem for:', echelle_order_key, e)
exposure_data[echelle_order_key][output_flx_key] = order_flx
# return original data with addition of a renormed spectrum
return exposure_data
def _flx_amp(flx, amp, cont=1.):
"""
:param flx:
:param amp:
:return:
"""
return cont - amp * (cont - flx)
def remove_ref_from_exposure(exposure_data, ref_flx, ref_wvl,
primary=True,
use_rv_key='RV_s1',
input_flx_key='flx',
fit_before_removal=False,
output_flx_key='flx_secon',
ref_orig=None, w_filt=None,
plot=False, plot_path='plot.png',
verbose=True):
"""
:param exposure_data:
:param ref_flx:
:param ref_wvl:
:param primary:
:param use_rv_key:
:param input_flx_key:
:param fit_before_removal:
:param output_flx_key:
:param ref_orig:
:param w_filt:
:param plot:
:param plot_path:
:param verbose:
:return:
"""
if use_rv_key not in exposure_data.keys():
if verbose:
print(' WARNING: Given RV key (' + use_rv_key + ') not found -> RV = 0. will be used.')
rv_val_star = 0.
else:
rv_val_star = exposure_data[use_rv_key]
if not np.isfinite(rv_val_star):
if verbose:
print(' WARNING: Component removal not possible as RV was not estimated.')
return exposure_data
# shift reference spectrum from stars' rest to barycentric/observed reference frame - use reversed RV value
ref_wvl_shifted = correct_wvl_for_rv(ref_wvl, -1. * rv_val_star)
echelle_orders = _valid_orders_from_keys(exposure_data.keys())
# loop trough all available Echelle orders
for echelle_order_key in echelle_orders:
# determine observed data that will be used in the primary removal procedure
order_flx = exposure_data[echelle_order_key][input_flx_key]
order_wvl = exposure_data[echelle_order_key]['wvl']
# resample reference spectrum to the observed wavelength pixels
ref_flx_order = _spectra_resample(ref_flx, ref_wvl_shifted, order_wvl)
# adjust/force reference flux to have the same amplitude as observed spectrum
# useful for stars with lower snr and/or reduction problems
if fit_before_removal:
# helper function used in the minimization process
def min_flx_dif_prim(amp):
# manhattan spectral distance between two spectra
return np.sum(np.abs((order_flx - 1.) - _flx_amp(ref_flx_order, amp, cont=0.)))
def min_flx_dif_sec(amp):
# manhattan spectral distance between two spectra
return np.sum(np.abs(order_flx - _flx_amp(ref_flx_order, amp, cont=1.)))
# minimize difference between observed and reference spectrum
if primary:
min_res = minimize(min_flx_dif_prim, [1.], bounds=[(0., 2.)])
else:
min_res = minimize(min_flx_dif_sec, [1.], bounds=[(0., 2.)])
# get the best amplitude correction factor
amp_use = min_res['x'][0]
if verbose:
print(' Flx amp modification (order - ' + str(echelle_order_key) + '): {:.3f}'.format(amp_use))
# correct flux for determined amplitude
if primary:
ref_flx_order = _flx_amp(ref_flx_order, amp_use, cont=0.)
else:
ref_flx_order = _flx_amp(ref_flx_order, amp_use, cont=1.)
# remove contribution of a reference spectrum by a simple spectral substraction
order_flx_diff = order_flx - ref_flx_order
# order_flx_diff = order_flx / ref_flx_order
if w_filt is not None:
exposure_data[echelle_order_key][output_flx_key] = medfilt(order_flx_diff, w_filt)
else:
exposure_data[echelle_order_key][output_flx_key] = order_flx_diff
if plot:
flx_orig_comb = _combine_orders(exposure_data, ref_wvl_shifted,
use_flx_key=input_flx_key, use_rv_key=None)
flx_seco_comb = _combine_orders(exposure_data, ref_wvl_shifted,
use_flx_key=output_flx_key, use_rv_key=None)
y_range = np.nanpercentile(flx_orig_comb, [0.4, 99.6])
flx_offset = 0.75 * (y_range[1] - y_range[0])
wvl_range = (np.min(ref_wvl_shifted[np.isfinite(flx_orig_comb)]) - 2.,
np.max(ref_wvl_shifted[np.isfinite(flx_orig_comb)]) + 2.)
x_ticks = range(4500, 7000, 20)
x_ticks_str = [str(xt) for xt in x_ticks]
fig, ax = plt.subplots(1, 1, figsize=(120, 5.))
if primary:
ax.plot(ref_wvl_shifted, flx_orig_comb, c='C3', lw=0.7, alpha=0.8)
ax.plot(ref_wvl_shifted, 1. + ref_flx, c='black', lw=0.5, alpha=0.8)
ax.plot(ref_wvl_shifted, 0.04 + flx_seco_comb, c='C2', lw=0.7, alpha=0.8)
else:
ax.plot(ref_wvl_shifted, flx_orig_comb, c='C3', lw=0.7, alpha=0.8)
ax.plot(ref_wvl_shifted, ref_flx, c='black', lw=0.5, alpha=0.8)
ax.plot(ref_wvl_shifted, 1.04 + flx_seco_comb, c='C2', lw=0.7, alpha=0.8)
ax.axhline(1.04, c='black', ls='--', lw=0.5, alpha=0.9)
if ref_orig is not None:
ax.plot(ref_wvl_shifted, ref_orig - flx_offset, c='red', lw=0.8)
y_range[0] -= flx_offset
ax.set(xlim=wvl_range,
ylim=[y_range[0], 1.05],
xlabel='Wavelength [A]', ylabel='Normalized and median removed flux',
xticks=x_ticks, xticklabels=x_ticks_str)
ax.grid(ls='--', alpha=0.2, color='black')
fig.tight_layout()
fig.savefig(plot_path, dpi=150)
plt.close(fig)
# return original data with addition of a reference corrected per order spectrum
return exposure_data
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# ------------ Function that runs the whole procedure at once --------------------
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
def _are_orders_renormed(exposures_data, input_key):
"""
:param exposures_data:
:param input_key:
:return:
"""
exposures_all = list(exposures_data.keys())
n_renorm = 0
for exp_id in exposures_all:
orders_all = _valid_orders_from_keys(exposures_data[exp_id].keys())
n_orders = 0
for ord_id in orders_all:
if input_key+'_renorm' in list(exposures_data[exp_id][ord_id].keys()):
n_orders += 1
if n_orders == len(orders_all):
n_renorm += 1
# return True if all orders in all exposures have renormed flux data
return n_renorm == len(exposures_all)
def run_complete_RV_and_template_discovery_procedure(star_data, obs_metadata, # datasets and tables
ref_flx, ref_wvl, # spectral reference data
star_id='', in_flx_key='flx', rv_key='RV_s1', # exact data that will be used
primary=True, # are we processing the most obvoius spectral component
combined_rv_spectrum=False, # processing settings
save_plots=True, plot_prefix='', plot_suffix='', # plotting settings
verbose=True, # screen verbocity setting
):
"""
:param star_data:
:param obs_metadata:
:param ref_flx:
:param ref_wvl:
:param star_id:
:param in_flx_key:
:param rv_key:
:param primary:
:param combined_rv_spectrum:
:param save_plots:
:param plot_prefix:
:param plot_suffix:
:param verbose:
:return:
"""
# some component specific processing and output settings
if primary:
c_id = 1
cont_value = 1.
else:
c_id = 2
cont_value = 0.
# set flux dataset that will be used in the processing
use_flx_key = deepcopy(in_flx_key)
if verbose:
print(' Spectra used for RV determination:', use_flx_key)
# get per order RV velocities for every exposure
for exp_id in star_data.keys():
if verbose:
print(' Exposure:', exp_id)
if combined_rv_spectrum:
# compute RV from a combined spectrum (stack of individual echelle orders)
rv_png = plot_prefix + '_' + exp_id + '_rv' + str(c_id) + '-combined' + plot_suffix + '.png'
rv_med, rv_std = get_RV_custom_corr_combined(deepcopy(star_data[exp_id]), ref_flx, ref_wvl,
cont_value=cont_value,
rv_ref_val=None, use_flx_key=use_flx_key,
plot_rv=True, plot_path=rv_png)
if verbose:
print(' Combined RV value:', rv_med, rv_std)
else:
# compute mean RV from all considered orders
rv_png = plot_prefix + '_' + exp_id + '_rv' + str(c_id) + '-orders' + plot_suffix + '.png'
rv_all, rv_med, rv_std = get_RV_custom_corr_perorder(deepcopy(star_data[exp_id]), ref_flx, ref_wvl,
cont_value=cont_value,
rv_ref_val=None, use_flx_key=use_flx_key,
plot_rv=False, plot_path=rv_png)
if verbose:
print(' Median RV value:', rv_med, rv_std)
star_data[exp_id][rv_key + '_orders'] = rv_all
# store values to the dictionary
star_data[exp_id][rv_key] = rv_med
star_data[exp_id]['e_' + rv_key] = rv_std
# compute median spectrum of a secondary star and use it as a new and updated RV template
use_flx_key_median = deepcopy(in_flx_key)
if verbose:
print(' Creating median reference spectrum')
combined_png = plot_prefix + '_s' + str(c_id) + '_combined' + plot_suffix + '.png'
# get new reference spectrum as median of all alligned spectra, per wvl pixel std is also computed and returned
ref_flx_new, _ = create_new_reference(star_data, ref_wvl,
# percentile=85.,
w_filt=3,
use_flx_key=use_flx_key_median, use_rv_key=rv_key,
plot_combined=True, plot_shifted=save_plots,
plot_path=combined_png)
# Add RV values of a binary star to the observations metadata table and plot phase RV diagram
rv_phase_plot_png = plot_prefix + '_RV' + str(c_id) + plot_suffix + '.png'
obs_metadata = add_rv_to_metadata(star_data, star_id,
deepcopy(obs_metadata), rv_key,
# always save this plot as it is the final result of the binary spectral processing
plot=True, plot_path=rv_phase_plot_png)
# finally return all important structures that hold gathered information and spectra
return star_data, obs_metadata, ref_flx_new
def show_spectra_heliocentric(star_data, order,
tellurics_data=None, prefix=''):
"""
:param star_data:
:param order:
:param tellurics_data:
:param prefix:
:return:
"""
fig, ax = plt.subplots(1, 1, figsize=(85, 5))
w_min = 10000
w_max = 0
for exp_is in star_data.keys():
exposure_data = star_data[exp_is]
if order not in exposure_data.keys():
continue
y_flx = exposure_data[order]['flx']
x_wvl = exposure_data[order]['wvl']
x_wvl = correct_wvl_for_rv(x_wvl, exposure_data['VHELIO']) # + or - VHELIO??
w_min = min(w_min, np.nanmin(x_wvl))
w_max = max(w_max, np.nanmax(x_wvl))
ax.plot(x_wvl, y_flx, lw=0.5, alpha=0.6)
# add telluric reference spectrum to the combined plot
if tellurics_data is not None:
ax.plot(tellurics_data[:, 0], tellurics_data[:, 1], lw=0.7, alpha=0.75, c='black')
# additional plotting settings
ax.set(ylim=(0.8, 1.05), xlim=(w_min, w_max))
ax.grid(ls='--', alpha=0.2, color='black')
fig.tight_layout()
fig.savefig(prefix + 'spec_helio_'+str(order)+'.png', dpi=150)
plt.close(fig)
return True
def plot_combined_spectrum_using_RV(exposure_data,
ref_flx_s1, ref_flx_s2, ref_wvl,
prim_rv='RV_s1', sec_rv='RV_s2', input_flx_key='flx',
plot=True, plot_path='plot.png'):
"""
:param exposure_data:
:param ref_flx_s1:
:param ref_flx_s2:
:param ref_wvl:
:param prim_rv:
:param sec_rv:
:param input_flx_key:
:param plot:
:param plot_path:
:return:
"""
# loop trough all available orders
flx_orig_comb = _combine_orders(exposure_data, ref_wvl,
use_flx_key=input_flx_key, use_rv_key=None)
# shift reference spectra into observed stars frame
rv_s1 = exposure_data[prim_rv]
rv_s2 = exposure_data[sec_rv]
if not np.isfinite(rv_s1):
rv_s1 = 0
if not | np.isfinite(rv_s2) | numpy.isfinite |
import tensorflow as tf
import numpy as np
from copy import copy
from .base import Model
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class KerasTFModel(Model):
def __init__(self, model, x_dim: int, u_dim: int, p_dim=0, tvp_dim=0, standardScaler=None):
if not standardScaler is None:
raise NotImplementedError("This feature isn't supported yet !")
#if not isinstance(model, (tf.keras.Model)):
# raise ValueError("The provided model isn't a Keras Model object !")
if len(model.input_shape) != 2:
raise NotImplementedError("Recurrent neural network are not supported atm.")
if model.output_shape[-1] != x_dim:
raise ValueError("Your Keras model do not provide a suitable output dim ! \n It must get the same dim as the state dim.")
if model.input_shape[-1] != sum((x_dim, u_dim, p_dim, tvp_dim)):
raise ValueError("Your Keras model do not provide a suitable input dim ! \n It must get the same dim as the sum of all input vars (x, u, p, tvp).")
super(KerasTFModel, self).__init__(x_dim, u_dim, p_dim, tvp_dim)
self.model = model
self.test = None
def __getstate__(self):
result_dict = copy(self.__dict__)
result_dict["model"] = None
return result_dict
def __setstate__(self, d):
self.__dict__ = d
def _gather_input(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
output_np = np.concatenate([x, u], axis=1)
if not tvp is None:
output_np = np.concatenate([output_np, tvp], axis=1)
if not p is None:
#TODO check this part
output_np = np.concatenate([output_np, np.array([[p,]*x.shape[0]])], axis=1)
return output_np
def forward(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_net = self._gather_input(x, u, p=p, tvp=tvp)
return self.model.predict(input_net)
def jacobian(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_net = self._gather_input(x, u, p=p, tvp=tvp)
input_tf = tf.constant(input_net)
with tf.GradientTape(persistent=False) as tx:
tx.watch(input_tf)
output_tf = self.model(input_tf)
jacobian_tf = tx.jacobian(output_tf, input_tf)
jacobian_np = jacobian_tf.numpy()[:,:,:,:-self.p_dim- self.tvp_dim]
jacobian_np = jacobian_np.reshape(x.shape[0]*self.x_dim, (self.x_dim+self.u_dim)*x.shape[0])
reshape_indexer = sum([ list(np.arange(x.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(x.shape[0]) ], list()) + \
sum([ list( x.shape[1]+np.arange(u.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(x.shape[0]) ], list())
jacobian_np = np.take(jacobian_np, reshape_indexer, axis=1)
return jacobian_np
@tf.function
def _hessian_compute(self, input_tf):
hessian_mask = tf.reshape(tf.eye(tf.shape(input_tf)[0]*self.model.output_shape[-1],tf.shape(input_tf)[0]*self.model.output_shape[-1]), (tf.shape(input_tf)[0]*self.model.output_shape[-1],tf.shape(input_tf)[0],self.model.output_shape[-1]))
hessian_mask = tf.cast(hessian_mask, tf.float64)
output_tf = self.model(input_tf)
output_tf = tf.cast(output_tf, tf.float64)
result = tf.map_fn(lambda mask : tf.hessians(output_tf*mask, input_tf)[0] , hessian_mask, dtype=tf.float32)
return result
def hessian(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_np = self._gather_input(x, u, p=p, tvp=tvp)
input_tf = tf.constant(input_np, dtype=tf.float32)
#if self.test is None:
# self.test = self._hessian_compute.get_concrete_function(input_tf=tf.TensorSpec([input_tf.shape[0], input_tf.shape[1]], tf.float64), output_shape=int(self.model.output_shape[-1]))
#hessian_np = self.test(input_tf, int(self.model.output_shape[-1])).numpy()
hessian_np = self._hessian_compute(input_tf).numpy()
hessian_np = hessian_np[:,:,:-self.p_dim - self.tvp_dim, :, :-self.p_dim - self.tvp_dim]
# TODO a better implem could be by spliting input BEFORE performing the hessian computation !
hessian_np = hessian_np.reshape(x.shape[0], x.shape[1], (input_np.shape[1]-self.p_dim - self.tvp_dim)*input_np.shape[0],( input_np.shape[1]-self.p_dim - self.tvp_dim)*input_np.shape[0])
reshape_indexer = sum([ list(np.arange(x.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(x.shape[0]) ], list()) + \
sum([ list( x.shape[1]+np.arange(u.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(x.shape[0]) ], list())
hessian_np = np.take(hessian_np, reshape_indexer, axis=2)
hessian_np = np.take(hessian_np, reshape_indexer, axis=3)
return hessian_np
@tf.function
def rolling_input(input_tf, x_dim, u_dim, rolling_window=2, H=2, forward=True):
# TODO do not take into account p and tvp
x = input_tf[:,0:x_dim]
u = input_tf[:,x_dim:x_dim+u_dim]
if forward:
x_rolling = tf.stack([ tf.reshape(x[i:i+rolling_window, :],(-1,)) for i in range(H)], axis=0)
u_rolling = tf.stack([ tf.reshape(u[i:i+rolling_window, :],(-1,)) for i in range(H)], axis=0)
else:
x_rolling = tf.stack([ tf.reshape( tf.reverse( x[i:i+rolling_window, :] , [0]),(-1,)) for i in range(H)], axis=0)
u_rolling = tf.stack([ tf.reshape( tf.reverse( u[i:i+rolling_window, :] , [0]),(-1,)) for i in range(H)], axis=0)
return tf.concat([x_rolling,u_rolling],axis=1)
class KerasTFModelRollingInput(Model):
def __init__(self, model, x_dim: int, u_dim: int, p_dim=0, tvp_dim=0, rolling_window=2, forward_rolling=True, standardScaler=None):
if not standardScaler is None:
raise NotImplementedError("This feature isn't supported yet !")
# TODO make checking according to rolling_window
#if not isinstance(model, (tf.keras.Model)):
# raise ValueError("The provided model isn't a Keras Model object !")
#if len(model.input_shape) != 2:
# raise NotImplementedError("Recurrent neural network are not supported atm.")
if model.output_shape[-1] != x_dim:
raise ValueError("Your Keras model do not provide a suitable output dim ! \n It must get the same dim as the state dim.")
#if model.input_shape[-1] != sum((x_dim, u_dim, p_dim, tvp_dim)):
# raise ValueError("Your Keras model do not provide a suitable input dim ! \n It must get the same dim as the sum of all input vars (x, u, p, tvp).")
if not isinstance(rolling_window, int) or rolling_window<1:
raise ValueError("Your rolling windows need to be an integer gretter than 1.")
super(KerasTFModelRollingInput, self).__init__(x_dim, u_dim, p_dim, tvp_dim)
self.model = model
self.rolling_window = rolling_window
self.forward_rolling = forward_rolling
self.prev_x, self.prev_u, self.prev_tvp = None, None, None
self.jacobian_proj = None
def __getstate__(self):
result_dict = copy(self.__dict__)
result_dict["prev_x"] = None
result_dict["prev_u"] = None
result_dict["model"] = None
result_dict["prev_tvp"] = None
return result_dict
def __setstate__(self, d):
self.__dict__ = d
def set_prev_data(self, x_prev: np.ndarray, u_prev: np.ndarray, tvp_prev=None):
assert x_prev.shape == (self.rolling_window-1, self.x_dim), f"Your x prev tensor must have the following shape {(self.rolling_window-1, self.x_dim)} (received : {x_prev.shape})"
assert u_prev.shape == (self.rolling_window-1, self.u_dim), f"Your u prev tensor must have the following shape {(self.rolling_window-1, self.u_dim)} (received : {u_prev.shape})"
self.prev_x = x_prev
self.prev_u = u_prev
if not tvp_prev is None:
assert tvp_prev.shape == (self.rolling_window-1, self.tvp_dim), f"Your tvp prev tensor must have the following shape {(self.rolling_window-1, self.tvp_dim)} (received : {tvp_prev.shape})"
self.prev_tvp = tvp_prev
def _gather_input(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
assert (not self.prev_x is None) and (not self.prev_u is None), "You must give history window with set_prev_data before calling any inferance function."
x_extended = np.concatenate([self.prev_x, x], axis=0)
u_extended = np.concatenate([self.prev_u, u], axis=0)
output_np = np.concatenate([x_extended, u_extended], axis=1)
if not tvp is None:
tvp_extended = np.concatenate([self.prev_tvp, tvp], axis=0)
output_np = np.concatenate([output_np, tvp_extended], axis=1)
if not p is None:
output_np = np.concatenate([output_np, np.array([[p,]*x.shape[0]])], axis=1)
return output_np
def _gather_input_V2(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
assert (not self.prev_x is None) and (not self.prev_u is None), "You must give history window with set_prev_data before calling any inferance function."
x_extended = np.concatenate([self.prev_x, x], axis=0)
u_extended = np.concatenate([self.prev_u, u], axis=0)
if self.forward_rolling:
x_rolling = np.stack([ x_extended[i:i+self.rolling_window, :].reshape(-1) for i in range(x.shape[0])], axis=0)
u_rolling = np.stack([ u_extended[i:i+self.rolling_window, :].reshape(-1) for i in range(x.shape[0])], axis=0)
if not tvp is None:
assert (not self.prev_tvp is None), "You must give history window with set_prev_data before calling any inferance function."
tvp_extended = np.concatenate([self.prev_tvp, tvp], axis=0)
tvp_rolling = np.stack([ tvp_extended[i:i+self.rolling_window, :].reshape(-1) for i in range(x.shape[0])], axis=0)
else:
x_rolling = np.stack([ (x_extended[i:i+self.rolling_window, :])[::-1,:].reshape(-1) for i in range(x.shape[0])], axis=0)
u_rolling = np.stack([ (u_extended[i:i+self.rolling_window, :])[::-1,:].reshape(-1) for i in range(x.shape[0])], axis=0)
if not tvp is None:
assert (not self.prev_tvp is None), "You must give history window with set_prev_data before calling any inferance function."
tvp_extended = np.concatenate([self.prev_tvp, tvp], axis=0)
tvp_rolling = np.stack([ (tvp_extended[i:i+self.rolling_window, :])[::-1,:].reshape(-1) for i in range(x.shape[0])], axis=0)
output_np = np.concatenate([x_rolling, u_rolling], axis=1)
if not tvp is None:
output_np = np.concatenate([output_np, tvp_rolling], axis=1)
if not p is None:
output_np = np.concatenate([output_np, np.array([[p,]*x.shape[0]])], axis=1)
return output_np
def forward(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_net = self._gather_input(x, u, p=p, tvp=tvp)
input_net_rolled = rolling_input(input_net, self.x_dim, self.u_dim, rolling_window=self.rolling_window, H=x.shape[0], forward=self.forward_rolling)
res = self.model.predict(input_net_rolled)
if not isinstance(res, np.ndarray):
return res.numpy()
return res
def jacobian(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_net = self._gather_input(x, u, p=p, tvp=tvp)
input_tf = tf.constant(input_net)
if self.jacobian_proj is None:
with tf.GradientTape(persistent=False) as tx:
tx.watch(input_tf)
input_tf_rolled = rolling_input(input_tf, self.x_dim, self.u_dim, rolling_window=self.rolling_window, H=x.shape[0], forward=self.forward_rolling)
self.jacobian_proj = tx.jacobian(input_tf_rolled, input_tf)
else:
input_tf_rolled = rolling_input(input_tf, self.x_dim, self.u_dim, rolling_window=self.rolling_window, H=x.shape[0], forward=self.forward_rolling)
with tf.GradientTape(persistent=False) as tx:
tx.watch(input_tf_rolled)
output_tf = self.model(input_tf_rolled)
pre_jac_tf = tx.jacobian(output_tf, input_tf_rolled)
jacobian_tf = tf.einsum("abcd,cdef->abef", pre_jac_tf, self.jacobian_proj)
jacobian_np = jacobian_tf.numpy().reshape(x.shape[0]*self.x_dim, (self.x_dim+self.u_dim)*(x.shape[0]+self.rolling_window-1))
reshape_indexer = sum([ list(np.arange(x.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(self.rolling_window-1 ,x.shape[0]+self.rolling_window-1) ], list()) + \
sum([ list( x.shape[1]+np.arange(u.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(self.rolling_window-1 ,x.shape[0]+self.rolling_window-1) ], list())
jacobian_np = np.take(jacobian_np, reshape_indexer, axis=1)
return jacobian_np
def jacobian_old(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_net = self._gather_input(x, u, p=p, tvp=tvp)
input_tf = tf.constant(input_net)
with tf.GradientTape(persistent=False) as tx:
tx.watch(input_tf)
input_tf_rolled = rolling_input(input_tf, self.x_dim, self.u_dim, rolling_window=self.rolling_window, H=x.shape[0], forward=self.forward_rolling)
output_tf = self.model(input_tf_rolled)
jacobian_tf = tx.jacobian(output_tf, input_tf)
jacobian_np = jacobian_tf.numpy().reshape(x.shape[0]*self.x_dim, (self.x_dim+self.u_dim)*(x.shape[0]+self.rolling_window-1))
reshape_indexer = sum([ list(np.arange(x.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(self.rolling_window-1 ,x.shape[0]+self.rolling_window-1) ], list()) + \
sum([ list( x.shape[1]+np.arange(u.shape[1])+i*(x.shape[1]+u.shape[1])) for i in range(self.rolling_window-1 ,x.shape[0]+self.rolling_window-1) ], list())
jacobian_np = np.take(jacobian_np, reshape_indexer, axis=1)
return jacobian_np
@tf.function
def _hessian_compute(self, input_tf):
H = tf.shape(input_tf)[0]
hessian_mask = tf.reshape(tf.eye(H*self.model.output_shape[-1],H*self.model.output_shape[-1]), (H*self.model.output_shape[-1],H,self.model.output_shape[-1]))
hessian_mask = tf.cast(hessian_mask, tf.float32)
output_tf = self.model(input_tf)
result = tf.map_fn(lambda mask : tf.hessians(output_tf*mask, input_tf)[0] , hessian_mask, dtype=tf.float32)
return result
def hessian(self, x: np.ndarray, u: np.ndarray, p=None, tvp=None):
input_np = self._gather_input_V2(x, u, p=p, tvp=tvp)
input_tf = tf.constant(input_np, dtype=tf.float32)
#if self.test is None:
# self.test = self._hessian_compute.get_concrete_function(input_tf=tf.TensorSpec([input_tf.shape[0], input_tf.shape[1]], tf.float64), output_shape=int(self.model.output_shape[-1]))
#hessian_np = self.test(input_tf, int(self.model.output_shape[-1])).numpy()
hessian_np = self._hessian_compute(input_tf).numpy()
# TODO a better implem could be by spliting input BEFORE performing the hessian computation !
hessian_np = hessian_np.reshape(x.shape[0], x.shape[1], input_np.shape[1]*input_np.shape[0], input_np.shape[1]*input_np.shape[0])
project_mat = | np.zeros(shape=(input_np.shape[1]*x.shape[0], (self.x_dim+self.u_dim)*(x.shape[0]+self.rolling_window-1))) | numpy.zeros |
import scipy.spatial as ssp
import numpy as np
import swarms.commons.utils as U
import shapely.geometry as sg
from swarms.base import Agent
class Evader(Agent):
def __init__(self, experiment):
super(Evader, self).__init__()
self.obs_radius = experiment.obs_radius
self.world_size = experiment.world_size
self.torus = experiment.torus
self.dynamics = 'direct'
self.max_speed = 2 * 10 # cm/s
if self.torus:
self.bounding_box = np.array([0., 2 * self.world_size, 0., 2 * self.world_size])
else:
self.bounding_box = np.array([0., self.world_size, 0., self.world_size])
self.action_callback = self.step
def reset(self, state):
self.state.p_pos = state
self.state.p_vel = np.zeros(2)
def step(self, agent, world):
if self.torus:
points_center = np.vstack([world.agent_states[:, 0:2], self.state.p_pos])
pursuers_down_right = np.hstack([world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2]])
pursuers_up_left = np.hstack([world.agent_states[:, 0:1], world.agent_states[:, 1:2] + world.world_size])
pursuers_up_right = np.hstack(
[world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2] + world.world_size])
evader_down_right = np.hstack([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2]])
evader_up_left = np.hstack([self.state.p_pos[0:1], self.state.p_pos[1:2] + world.world_size])
evader_up_right = np.hstack([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2] + world.world_size])
points_down_right = np.hstack([points_center[:, 0:1] + world.world_size, points_center[:, 1:2]])
points_up_left = np.hstack([points_center[:, 0:1], points_center[:, 1:2] + world.world_size])
points_up_right = np.hstack(
[points_center[:, 0:1] + world.world_size, points_center[:, 1:2] + world.world_size])
nodes = np.vstack([world.agent_states[:, 0:2],
pursuers_down_right,
pursuers_up_left,
pursuers_up_right,
self.state.p_pos,
evader_down_right,
evader_up_left,
evader_up_right])
dist_matrix_full = U.get_euclid_distances(nodes)
quadrant_check = np.sign(self.state.p_pos - world.world_size / 2)
if np.all(quadrant_check == np.array([1, 1])):
evader_quadrant = 0
elif np.all(quadrant_check == np.array([-1, 1])):
evader_quadrant = 1
elif np.all(quadrant_check == np.array([1, -1])):
evader_quadrant = 2
elif np.all(quadrant_check == np.array([-1, -1])):
evader_quadrant = 3
evader_dist = dist_matrix_full[:-4, -4 + evader_quadrant]
sub_list = list(np.where(evader_dist < self.obs_radius)[0])
if len(sub_list) > 10:
sub_list = list(np.argsort(evader_dist)[0:10])
sub_list.append(4 * world.nr_agents + evader_quadrant)
evader_sub = len(sub_list) - 1
closest_pursuer = np.where(evader_dist == evader_dist.min())[0]
nodes_center_sub = nodes[sub_list, :]
nodes_left = np.copy(nodes_center_sub)
nodes_left[:, 0] = self.bounding_box[0] - (nodes_left[:, 0] - self.bounding_box[0])
nodes_right = np.copy(nodes_center_sub)
nodes_right[:, 0] = self.bounding_box[1] + (self.bounding_box[1] - nodes_right[:, 0])
nodes_down = np.copy(nodes_center_sub)
nodes_down[:, 1] = self.bounding_box[2] - (nodes_down[:, 1] - self.bounding_box[2])
nodes_up = np.copy(nodes_center_sub)
nodes_up[:, 1] = self.bounding_box[3] + (self.bounding_box[3] - nodes_up[:, 1])
points = np.vstack([nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up])
else:
nodes = np.vstack([world.agent_states[:, 0:2],
self.state.p_pos,
])
distances = U.get_euclid_distances(nodes)
evader_dist = distances[-1, :-1]
closest_pursuer = np.where(evader_dist == evader_dist.min())[0]
sub_list = list( | np.where(evader_dist < self.obs_radius) | numpy.where |
import subprocess
lib_list = ['numpy','csv','seaborn','matplotlib']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
################################################################################
###### Fetch the result data and plot out the PDF and mean+/-STD figures #######
################################################################################
# Function of reading data
def LumenRead(path,numfile):
resultlist = np.empty((0,39), float)
for i in range(numfile):
filename = os.path.join(path,('lumen_area_000'+str("{0:0=3d}".format(i))+'.csv'))
reader = csv.reader(open(filename, "r"), delimiter='\t')
x = list(reader)
result = | np.array(x[0][:-1]) | numpy.array |
"""
Core functions to evaluate piecewise polynomial interpolants (or their
derivatives) in one dimension. Additional functions are provided that do
this for two such interpolants, to facilitate the interpolation of paired
variables like salinity and temperature.
This subpackage is ideally suited when interpolation to many different
evaluation sites is needed, such as when solving a nonlinear equation
involving the interpolants, because it pre-computes the piecewise polynomial
coefficients once, and then interpolation to a given evaluation site is fast.
However, if the interpolant needs to be evaluated just once or a few times,
the subpackage `interp1d` may be preferred.
"""
"""
Adapted from 'Piecewise Polynomial Calculus' available on the MATLAB
File Exchange at
https://mathworks.com/matlabcentral/fileexchange/73114-piecewise-polynomial-calculus
"""
import numpy as np
import numba as nb
@nb.njit
def ppval_i(dx, Yppc, i, d=0):
"""
Evaluate a single interpolant, knowing where the evaluation site lies.
Provides `i` such that `X[i] < x <= X[i+1]`
"""
# Evaluate polynomial, using nested multiplication.
# E.g. the cubic case is:
# y = dx^3 * Yppc[i,0] + dx^2 * Yppc[i,1] + dx * Yppc[i,2] + Yppc[i,3]
# = dx * (dx * (dx * Yppc[i,0] + Yppc[i,1]) + Yppc[i,2]) + Yppc[i,3]
y = 0.0
if d == 0:
for j in range(0, Yppc.shape[1]):
y = y * dx + Yppc[i, j]
else:
# Evaluate polynomial derivative, using nested multiplication.
# E.g. the second derivative of the cubic case is:
# y = 6 * dx * Yppc[i,0] + 2 * Yppc[i,1]
y = 0.0
degree = Yppc.shape[1] - 1
for o in range(0, degree - d + 1): # o for order
p = 1.0 # Build integer multiplying the coefficient
for a in range(degree - o - d + 1, degree - o + 1):
p *= a
# p = np.prod(np.arange(degree - o - d + 1, degree - o + 1)) # slow!
y = y * dx + Yppc[i, o] * p
return y
@nb.njit
def ppval1(x, X, Yppc, d=0):
"""
Evaluate a single piecewise polynomial (PP).
Parameters
----------
x : float
Evaluation site
X : ndarray, 1d
Independent variable.
Yppc : ndarray, 2d
Piecewise Polynomial Coefficients. First dimension must match `len(X)`.
d : int, Default 0
Number of derivatives to take. If 0, simply evaluate the PP.
Returns
-------
y : float
The value of the PP (or its `d`'th derivative) at `X = x`.
"""
if np.isnan(x) or x < X[0] or X[-1] < x or | np.isnan(X[0]) | numpy.isnan |
import utils
import utils1
import utils2
import utils3
import utils4
import config
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.metrics import roc_curve, auc
# os.environ['CUDA_VISIBLE_DEVICES']='0'
# CUDA_VISIBLE_DEVICES=0
with open(os.path.join(config.datasetPath, "test.tsv"), "r", encoding='utf8') as f:
lines = f.readlines()
lines = [line.strip().split('\t') for line in lines]
scores0 = []
label0 = []
text0 = []
length0 = []
# for line in tqdm(lines[1:8606]):
for line in tqdm(lines[1:10000]):
vec0 = utils.sent2vec(line[0])
vec1 = utils.sent2vec(line[1])
score = np.dot(vec0, vec1.T)[0, 0]
scores0.append(score)
label0.append(int(line[2]))
text0.append((line[0], line[1]))
length0.append((len(line[0]) + len(line[1])) / 2)
scores0 = np.array(scores0)
label0 = np.array(label0).astype(int)
fpr, tpr, thresholds = roc_curve(label0, scores0)
roc_auc=auc(fpr,tpr)
print('distiluse-base-multilingual-cased-v1: ',roc_auc)
plt.figure()
lw = 2
plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='distiluse-base-multilingual-cased-v1 (area = %0.6f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC')
# plt.legend(loc="lower right")
# plt.show()
# plt.hist(scores[label == 0])
# plt.show()
# plt.hist(scores[label == 1])
# plt.show()
scores2 = []
label2 = []
text2 = []
for line in tqdm(lines[1:10000]):
vec0 = utils2.sent2vec(line[0])
vec1 = utils2.sent2vec(line[1])
score2 = np.dot(vec0, vec1.T)[0, 0]
scores2.append(score2)
label2.append(int(line[2]))
text2.append((line[0], line[1]))
scores2 = np.array(scores2)
label2 = np.array(label2).astype(int)
fpr, tpr, thresholds = roc_curve(label2, scores2)
roc_auc=auc(fpr,tpr)
print('distiluse-base-multilingual-cased-v2: ',roc_auc)
# plt.figure(0).clf()
lw = 2
# plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='blue',
lw=lw, label='distiluse-base-multilingual-cased-v2 (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
scores3 = []
label3 = []
text3 = []
for line in tqdm(lines[1:10000]):
vec0 = utils3.sent2vec(line[0])
vec1 = utils3.sent2vec(line[1])
score3 = np.dot(vec0, vec1.T)[0, 0]
scores3.append(score3)
label3.append(int(line[2]))
text3.append((line[0], line[1]))
scores3 = np.array(scores3)
label3 = np.array(label3).astype(int)
fpr, tpr, thresholds = roc_curve(label3, scores3)
roc_auc=auc(fpr,tpr)
print('all-distilroberta-v1: ',roc_auc)
# plt.figure(0).clf()
lw = 2
# plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='green',
lw=lw, label='all-distilroberta-v1 (area = %0.6f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
scores1 = []
label1 = []
text1 = []
length1 = []
# for line in tqdm(lines[1:8606]):
for line in tqdm(lines[1:10000]):
vec0 = utils1.sent2vec(line[0])
vec1 = utils1.sent2vec(line[1])
score1 = np.dot(vec0, vec1.T)[0, 0]
scores1.append(score1)
label1.append(int(line[2]))
text1.append((line[0], line[1]))
length1.append((len(line[0]) + len(line[1])) / 2)
scores1 = np.array(scores1)
label1 = np.array(label1).astype(int)
fpr, tpr, thresholds = roc_curve(label1, scores1)
roc_auc=auc(fpr,tpr)
print('all-MiniLM-L6-v2: ',roc_auc)
# plt.figure(0).clf()
lw = 2
# plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='red',
lw=lw, label='all-MiniLM-L6-v2 (area = %0.6f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('ROC')
# plt.legend(loc="lower right")
# plt.show()
# plt.hist(scores1[label1 == 0])
# plt.show()
# plt.hist(scores1[label1 == 1])
# plt.show()
# scores2 = []
# label2 = []
# text2 = []
# for line in tqdm(lines[1:2000]):
# vec0 = utils2.sent2vec(line[0])
# vec1 = utils2.sent2vec(line[1])
# score2 = np.dot(vec0, vec1.T)[0, 0]
# scores2.append(score2)
# label2.append(int(line[2]))
# text2.append((line[0], line[1]))
# scores2 = np.array(scores2)
# label2 = np.array(label2).astype(int)
# fpr, tpr, thresholds = roc_curve(label2, scores2)
# roc_auc=auc(fpr,tpr)
# print('distiluse-base-multilingual-cased-v2: ',roc_auc)
# # plt.figure(0).clf()
# lw = 2
# # plt.figure(figsize=(10,10))
# plt.plot(fpr, tpr, color='blue',
# lw=lw, label='distiluse-base-multilingual-cased-v2 (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
# # # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# # # plt.xlim([0.0, 1.0])
# # # plt.ylim([0.0, 1.05])
# # # plt.xlabel('False Positive Rate')
# # # plt.ylabel('True Positive Rate')
# # # plt.title('ROC')
# # # plt.legend(loc="lower right")
# # # plt.show()
# scores3 = []
# label3 = []
# text3 = []
# for line in tqdm(lines[1:2000]):
# vec0 = utils3.sent2vec(line[0])
# vec1 = utils3.sent2vec(line[1])
# score3 = np.dot(vec0, vec1.T)[0, 0]
# scores3.append(score3)
# label3.append(int(line[2]))
# text3.append((line[0], line[1]))
# scores3 = np.array(scores3)
# label3 = np.array(label3).astype(int)
# fpr, tpr, thresholds = roc_curve(label3, scores3)
# roc_auc=auc(fpr,tpr)
# print('all-distilroberta-v1: ',roc_auc)
# # plt.figure(0).clf()
# lw = 2
# # plt.figure(figsize=(10,10))
# plt.plot(fpr, tpr, color='green',
# lw=lw, label='all-distilroberta-v1 (area = %0.6f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
# # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# # plt.xlim([0.0, 1.0])
# # plt.ylim([0.0, 1.05])
# # plt.xlabel('False Positive Rate')
# # plt.ylabel('True Positive Rate')
# # plt.title('ROC')
# # plt.legend(loc="lower right")
# # plt.show()
scores4 = []
label4 = []
text4 = []
for line in tqdm(lines[1:10000]):
vec0 = utils4.sent2vec(line[0])
vec1 = utils4.sent2vec(line[1])
score4 = | np.dot(vec0, vec1.T) | numpy.dot |
import numpy as np
import unittest
from unittest.mock import patch
from status import BallDirection, BallStatus, FieldStatus, LateralDirection, Possession
from matrix import HOME_TEAM_MOVING_STATUS, HOME_TEAM_GOAL_STATUS
from data import Skills, Team
from field import FIELD_CENTER_X, FIELD_CENTER_Y, FIELD_MAX_X, FIELD_MIN_X, Position
from game_matrix import GameMatrix
class TestGameMatrix(unittest.TestCase):
def setUp(self):
self.home_team = Team(
name = "AAA",
forwards = Skills(0.5, 0.5, 0.5),
mid_field = Skills(0.5, 0.5, 0.5),
backs = Skills(0.5, 0.5, 0.5),
ruck = Skills(0.5, 0.5, 0.5)
)
self.away_team = Team(
name = "BBB",
forwards = Skills(0.5, 0.5, 0.5),
mid_field = Skills(0.5, 0.5, 0.5),
backs = Skills(0.5, 0.5, 0.5),
ruck = Skills(0.5, 0.5, 0.5)
)
def set_numpy_side_effect(self, field_status, ball_direction, lateral_direction):
return [
| np.array([field_status]) | numpy.array |
import numpy as np
from numpy import exp
from scipy.special import gammaln
from ConfirmatoryLDA.utils import _dirichlet_expectation_2d, _dirichlet_expectation_1d
EPS = np.finfo(np.float).eps
class CLDA_VI:
def __init__(self, alpha, eta, K, eta_seed=None, eta_not_seed=None, seed_words=None,
confirmatory=None, evaluate_every=10):
self.alpha = alpha # hyperparameter; dimension: T * 1 but assume symmetric prior
if confirmatory:
self.eta_ordinary = eta # hyperparameter; dimension: M * 1 but assume symmetric prior
else:
self.eta = eta
self.eta_seed = eta_seed
self.eta_not_seed = eta_not_seed
self.seed_words = seed_words
self.K = K
self.evaluate_every = evaluate_every
self.confirmatory = confirmatory
self.perplexity = []
def _init_params(self, X, cv):
'''
Initialize parameters for LDA
This is variational free parameters each endowed to variational distribution
q(Z_{di} = k) ~ Multi(phi_{dwk})
q(theta_d) ~ Dir(gamma_d)
q(beta_k) ~ Dir(lambda_k)
'''
self.w2idx = cv.vocabulary_
self.idx2w = {val: key for key, val in self.w2idx.items()}
self.D, self.V = X.shape
self.Nd = [len(np.nonzero(X[doc, :])[0]) for doc in range(self.D)]
if self.confirmatory:
self.seed_word_index = []
# change words in seed_words dictionary to index
for key in self.seed_words.keys():
# filter seed words existing in corpus vocabulary
self.seed_words[key] = [i for i in self.seed_words[key] if i in list(self.w2idx.keys())]
self.seed_words[key] = [self.w2idx[i] for i in self.seed_words[key]]
self.seed_word_index += self.seed_words[key]
self.seed_word_index = list(set(self.seed_word_index))
# make asseymmetric prior for word-topic distribution
# different by each topic
self.eta = self.eta_ordinary * | np.ones((self.K, self.V)) | numpy.ones |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import sys, os
from unittest.mock import patch
sys.path.append(os.path.abspath("..")) # current folder is ~/tests
from idaes.core.surrogate.pysmo.polynomial_regression import (
PolynomialRegression,
FeatureScaling,
)
import numpy as np
import pandas as pd
import pytest
class TestFeatureScaling:
test_data_1d = [[x] for x in range(10)]
test_data_2d = [[x, (x + 1) ** 2] for x in range(10)]
test_data_3d = [[x, x + 10, (x + 1) ** 2 + x + 10] for x in range(10)]
test_data_3d_constant = [[x, 10, (x + 1) ** 2 + 10] for x in range(10)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9]])
expected_output_2 = np.array([[0]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(
output_1, np.array(expected_output_1).reshape(10, 1)
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 100]])
expected_output_2 = np.array([[0, 1]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 19, 119]])
expected_output_2 = np.array([[0, 10, 11]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 10, 110]])
expected_output_2 = np.array([[0, 10, 11]])
scale = expected_output_3 - expected_output_2
scale[scale == 0.0] = 1.0
expected_output_1 = (input_array - expected_output_2) / scale
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [list])
def test_data_scaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
with pytest.raises(TypeError):
FeatureScaling.data_scaling(input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
output_1 = output_1.reshape(
output_1.shape[0],
)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array.reshape(10, 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1]])
max_array = np.array([[5]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_06(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1, 2, 3]])
max_array = np.array([[5, 6, 7]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
class TestPolynomialRegression:
y = np.array(
[
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 21)
for j in np.linspace(0, 10, 21)
]
)
full_data = {"x1": y[:, 0], "x2": y[:, 1], "y": y[:, 2]}
training_data = [
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 5)
for j in np.linspace(0, 10, 5)
]
test_data = [[i, (i + 1) ** 2] for i in range(10)]
test_data_large = [[i, (i + 1) ** 2] for i in range(200)]
test_data_1d = [[(i + 1) ** 2] for i in range(10)]
test_data_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(10)]
sample_points = [[i, (i + 1) ** 2] for i in range(8)]
sample_points_large = [[i, (i + 1) ** 2] for i in range(100)]
sample_points_1d = [[(i + 1) ** 2] for i in range(8)]
sample_points_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(8)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
assert PolyClass.max_polynomial_order == 5
assert (
PolyClass.number_of_crossvalidations == 3
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 4 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.75 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.5
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 10 # Default maximum number of iterations
assert PolyClass.solution_method == "pyomo" # Default solution_method
assert PolyClass.multinomials == 1 # Default multinomials
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
@pytest.mark.unit
def test__init__02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
assert PolyClass.max_polynomial_order == 3
assert (
PolyClass.number_of_crossvalidations == 5
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 6 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.5 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.4
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 20 # Default maximum number of iterations
assert (
PolyClass.solution_method == "mle"
) # Default solution_method, doesn't matter lower / upper characters
assert PolyClass.multinomials == 0 # Default multinomials
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [list])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__03(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [list])
def test__init__04(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__05(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_large)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__06(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_3d)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__07(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_3d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__08(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_1d)
regression_data_input = array_type2(self.sample_points_1d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__09(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=11,
)
assert (
PolyClass.number_of_crossvalidations == 11
) # Default number of cross-validations
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__10(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1.2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__11(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_large)
regression_data_input = array_type2(self.sample_points_large)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=11
)
assert PolyClass.max_polynomial_order == 10
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__12(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__13(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=-1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__14(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__15(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=-1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__16(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
regression_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__17(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=0,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__18(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__19(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__20(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__21(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=15
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__22(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__23(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="idaes",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__24(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
multinomials=3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__25(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=-2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__26(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__27(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
no_adaptive_samples=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__28(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
max_iter=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__29(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
overwrite=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__30(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname="solution.pkl",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__31(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=1,
)
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__32(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
assert PolyClass1.filename == PolyClass2.filename
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__33(self, array_type1, array_type2):
file_name1 = "sol_check1.pickle"
file_name2 = "sol_check2.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name1,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name2,
overwrite=True,
)
assert PolyClass1.filename == file_name1
assert PolyClass2.filename == file_name2
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__34(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolygClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
assert PolyClass1.filename == PolygClass2.filename
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.01,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.99,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
additional_data_input = np.array(
[
[
i**2,
((i + 1) * 2) + ((j + 1) * 2),
j**4,
((i + 1) * 2) + ((j + 1) ** 2),
]
for i in range(5)
for j in range(5)
]
)
training_data, cross_val_data = PolyClass.training_test_data_creation(
additional_features=additional_data_input
)
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations * 2
assert len(cross_val_data) == PolyClass.number_of_crossvalidations * 2
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert (
training_data["training_extras_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
assert (
cross_val_data["test_extras_" + str(i)].shape[0] == expected_test_size
)
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
concat_02 = np.concatenate(
(
training_data["training_extras_" + str(i)],
cross_val_data["test_extras_" + str(i)],
),
axis=0,
)
additional_data_sorted = additional_data_input[
np.lexsort(
(
additional_data_input[:, 3],
additional_data_input[:, 2],
additional_data_input[:, 1],
additional_data_input[:, 0],
)
)
]
concat_02_sorted = concat_02[
np.lexsort(
(concat_02[:, 3], concat_02[:, 2], concat_02[:, 1], concat_02[:, 0])
)
]
np.testing.assert_equal(additional_data_sorted, concat_02_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 4 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=2
)
poly_degree = 2
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 6 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=10
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 22 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
expected_output[:, 21] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
| np.testing.assert_equal(output_1, expected_output) | numpy.testing.assert_equal |
#!/usr/bin/python
"""
atom_energy_reporter.py
MIT License
Copyright (c) 2018
Weill Cornell Medicine, Memorial Sloan Kettering Cancer Center, and Authors
Authors:
<NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from openmmtools import testsystems
import mdtraj as md
import netCDF4
from netCDF4 import Dataset
import warnings
import time
# NOTE:
# - currently only the most common energy were implemented
# TODO:
# - implement AMOEBA forces
class AtomEnergyReporter(object):
"""
AtomEnergyReporter outputs information about every atom in a simulation,
including the energy breakdown, to a file.
to use it, create an AtomEnergyReporter, then add it to the list of reporters
of the list of the Simulation. by default the data is written in CSV format.
this module is written in order to implement the algorithm developed by
<NAME> and <NAME> in University of Cambridge.
this calculates the Eq.11 in the paper:
$$
u_{X_a} = \\
\frac12(u_{electrostaic} + u_{Lennard-Jones} + u_{bonded} + u_{Urey-Bradley}) \\
+ \frac13 u_{angle} \\
+ \frac14 u_{dihedral} + u_{improper}
$$
further data analysis is needed
ref:
https://pubs.acs.org/doi/abs/10.1021/acs.jctc.8b00027
"""
def __init__(self, file_path, reportInterval, idxs = None):
"""
create a AtomEnergyReporter
parameters
----------
file : a string
the file to write to
reportInterval : int
the interval at which to write
"""
self._reportInterval = reportInterval
self.idxs = idxs
self.force_map = {
'AmoebaAngleForce' : self.analyze_amoeba_angle_force,
'AmoebaBondForce' : self.analyze_amoeba_bond_force,
'AmoebaGeneralizedKirkwoodForce' : self.analyze_amoeba_generalized_kirkwood_force,
'AmoebaInPlaneAngleForce' : self.analyze_amoeba_in_plane_angle_force,
'AmoebaMultipoleForce' : self.analyze_amoeba_multipole_force,
'AmoebaOutOfPlaneBendForce' : self.analyze_amoeba_out_of_plane_bend_force,
'AmoebaPiTorsionForce' : self.analyze_amoeba_pi_torsion_force,
'AmoebaStretchBendForce' : self.analyze_amoeba_stretch_bend_force,
'AmoebaTorsionTorsionForce' : self.analyze_amoeba_torsion_torsion_force,
'AmoebaVdwForce' : self.analyze_amoeba_vdw_force,
'AmoebaWcaDispersionForce' : self.analyze_amoeba_wca_dispersion_force,
'AndersenThermostat' : self.analyze_andersen_thermostat,
'CMAPTorsionForce' : self.analyze_cmap_torsion_force,
'CMMotionRemover' : self.analyze_cmm_motion_remover,
'CustomAngleForce' : self.analyze_custom_angle_force,
'CustomBondForce' : self.analyze_custom_bond_force,
'CustomCVForce' : self.analyze_custom_cv_force,
'CustomCentroidBondForce' : self.analyze_centroid_bond_force,
'CustomCompoundBondForce' : self.analyze_custom_compound_bond_force,
'CustomExternalForce' : self.analyze_custom_external_force,
'CustomGBForce' : self.analyze_gb_force,
'CustomHbondForce' : self.analyze_hbond_force,
'CustomManyParticleForce' : self.analyze_custom_many_particle_force,
'CustomNonbondedForce' : self.analyze_custom_nonbonded_force,
'CustomTorsionForce' : self.analyze_custom_torsion_force,
'DrudeForce' : self.analyze_drude_force,
'GBSAOBCForce' : self.analyze_gbsaobc_force,
'GayBerneForce' : self.analyze_gay_berne_force,
'HarmonicAngleForce' : self.analyze_harmonic_angle_force,
'HarmonicBondForce' : self.analyze_harmonic_bond_force,
'MonteCarloAnisotropicBarostat' : self.analyze_monte_carlo_anisotropic_barostat,
'MonteCarloBarostat' : self.analyze_monte_carlo_barostat,
'MonteCarloMembraneBarostat' : self.analyze_monte_carlo_membrane_barostat,
'NonbondedForce' : self.analyze_nonbonded_force,
'PeriodicTorsionForce' : self.analyze_periodic_torsion_force,
'RBTorsionForce' : self.analyze_rb_torsion_force,
'RPMDMonteCarloBarostat' : self.analyze_rpmd_monte_carlo_barostat
}
# create a netCDF4 Dataset to record the energy
self._out = Dataset(file_path ,'w')
self._out.createDimension("time", None)
times = self._out.createVariable("time", "i8", ("time",))
times.unit = str(self._reportInterval)
self.time = 0
# let the analyzer register for once
self.registered = False
def describeNextReport(self, simulation):
"""
adopted from:
openmm/wrappers/python/simtk/openmm/app/statedatareporter.py
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep%self._reportInterval
return (steps, True, False, True, True)
def report(self, simulation, state):
"""
generate a report
parameters
----------
simulation : an OpenMM simulation object
state : an OpenMM state object
"""
# find the small molecule to analyze
if self.registered == False: # if the system is not registered, register the system
if self.idxs == None:
self.find_small_mol(simulation, state)
# set the attributes in Dataset
self._out.description = 'record of an OpenMM run'
self._out.history = 'created ' + time.ctime(time. time())
# initialize the Dataset
self._out.createDimension("atom", len(self.idxs))
self._out.createVariable("atom", "i8", ("atom", ))
atoms_name = ["idx = %s; mass = %s" % (idx, simulation.system.getParticleMass(idx)) for idx in self.idxs]
self._out.setncattr('atoms_name', atoms_name)
# get the forces
self.forces = simulation.system.getForces()
self.force_idx_mapping = [force for force in self.forces]
forces_name = [force.__class__.__name__ for force in self.forces]
self._out.setncattr('forces_name', forces_name)
# create a force dimension, using idxs
# and initialize the forces
self._out.createDimension("force", len(self.forces))
self._out.createVariable("force", "i8", ("force", ))
# initialize the energy variable
# that stands on the dimensions of: time, atom, and force
self.energy_var = self._out.createVariable("energy", "f4", ("time", "atom", "force"))
self.energy_var.units = 'kJ/mol'
# keep a copy of all the positions
self._out.createDimension("xyz", 3)
self.pos_var = self._out.createVariable("pos", "f4", ("time", "atom", "xyz"))
# keep a copy of the parameters of atoms
param_array = np.zeros((len(self.idxs), 3))
for force in self.forces:
if force.__class__.__name__ == "NonbondedForce":
for idx in self.idxs:
charge, sigma, epsilon = force.getParticleParameters(idx)
param_array[idx, 0], param_array[idx, 1], param_array[idx, 2] = charge._value, sigma._value, epsilon._value
# note that the units here are: elementary charge, nanometer, kilojoule/mole
self._out.setncattr('param_array', param_array)
# set the registered flag to True,
# since you only need to do this once
self.registered = True
# point these objects to the class, and update them
self.simulation = simulation
self.state = state
# get the positions of the small molecules
self.pos = tuple([state.getPositions()[idx] for idx in self.idxs])
pos_matrix = np.array([state.getPositions(asNumpy=True)[idx]._value for idx in self.idxs])
self.pos_var[self.time, :, :] = pos_matrix
# analyze each force in the system
for force_idx, force in enumerate(self.force_idx_mapping):
energy_dict = self.get_energy(force)
if energy_dict == None:
warnings.warn("no force information could be extracted from %s" % force.__class__.__name__)
continue
for atom_idx, energy in energy_dict.items():
self.energy_var[self.time, atom_idx, force_idx] = energy._value
# note that the unit here is kilojoule/mole
# increase the time dimension by one
self.time += 1
def find_small_mol(self, simulation, state):
"""
find the atoms of the smallest molecule, which is most likely to be
the region of greates interest for a simulation
parameters
----------
simulation : an OpenMM Simulation object
state : an OpenMM State object
returns
-------
atoms : a tuple of indicies of atoms that belongs to the small molecule
"""
context = simulation.context
mols = context.getMolecules()
small_mol = sorted([mol for mol in mols if len(mol) > 4],
key = lambda mol : len(mol), reverse = False)[0]
# register the atoms and idxs in the class
self.idxs = small_mol
return small_mol
def get_energy(self, force):
"""
anlyzes force and return the energy,
to be more specific, match the force with a certain type of analysis function
"""
name = str(force.__class__.__name__) # get the name of the force
energy_dict = self.force_map[name](force) # map the force to its specific analyze function and get the energy
return energy_dict
#################################################
# herlper functions to calculate distance, angles,
# and dihedral angels from positions of atoms
#################################################
def dist(self, atom0, atom1):
"""
calculate the distance between two atoms
require that self.pos is defined
parameters
----------
atom0 : the idx of the first atom
atom1 : the idx of the second atom
returns
-------
dist : a float representing the distance between the two atoms
"""
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
dist = np.linalg.norm(pos0 - pos1)
return dist
def angle(self, center_atom, atom0, atom1):
"""
calculate the angle between bond:
center_atom -- atom0
and
center_atom -- atom1
$ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
parameters
----------
center_atom : the idx of the center atom
atom0 : the idx of the first atom involved in the angle
atom1 : the idx of the second atom involved in the angle
returns
-------
angle : the value of the angle in rads
"""
# get all the positions
pos_center = self.pos[center_atom]
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
# express the distance in vectors
v0 = np.array(pos0) - np.array(pos_center)
v1 = np.array(pos1) - np.array(pos_center)
# to calculate:
# $ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
v0_dot_v1 = np.dot(v0, v1)
v0_norm = np.linalg.norm(v0)
v1_norm = | np.linalg.norm(v1) | numpy.linalg.norm |
import numpy
from .geometry import (
spatial,
area as _area,
centroid as _centroid,
contains as _contains,
bbox as _bbox,
prepare_hull as _prepare_hull,
HULL_TYPES,
)
# ------------------------------------------------------------ #
# Utilities #
# ------------------------------------------------------------ #
def parse_size_and_intensity(hull, intensity=None, size=None):
"""
Given a hull, an intensity, and a size int/tuple, correctly
compute the resulting missing quantities. Defaults to 100 points in one
replication, meaning the intensity will be computed on the fly
if nothing is provided.
Parameters
----------
hull : A geometry-like object
This encodes the "space" in which to simulate the normal pattern. All points will
lie within this hull. Supported values are:
- a bounding box encoded in a numpy array as numpy.array([xmin, ymin, xmax, ymax])
- an (N,2) array of points for which the bounding box will be computed & used
- a shapely polygon/multipolygon
- a pygeos geometry
- a scipy convexh hull
intensity : float
the number of observations per unit area in the hull to use. If provided,
then the number of observations is determined using the intensity * area(hull) and
the size is assumed to represent n_replications (if provided).
size : tuple or int
a tuple of (n_observations, n_replications), where the first number is the number
of points to simulate in each replication and the second number is the number of
total replications. So, (10, 4) indicates 10 points, 4 times.
If an integer is provided and intensity is None, n_replications is assumed to be 1.
If size is an integer and intensity is also provided, then size indicates n_replications,
and the number of observations is computed on the fly using intensity and area.
"""
if size is None:
if intensity is not None:
# if intensity is provided, assume
# n_observations
n_observations = int(_area(hull) * intensity)
else:
# default to 100 points
n_observations = 100
intensity = n_observations / _area(hull)
n_simulations = 1
size = (n_observations, n_simulations)
elif isinstance(size, tuple):
if len(size) == 2 and intensity is None:
n_observations, n_simulations = size
intensity = n_observations / _area(hull)
elif len(size) == 2 and intensity is not None:
raise ValueError(
"Either intensity or size as (n observations, n simulations)"
" can be provided. Providing both creates statistical conflicts."
" between the requested intensity and implied intensity by"
" the number of observations and the area of the hull. If"
" you want to specify the intensity, use the intensity argument"
" and set size equal to the number of simulations."
)
else:
raise ValueError(
f"Intensity and size not understood. Provide size as a tuple"
f" containing (number of observations, number of simulations)"
f" with no specified intensity, or an intensity and size equal"
f" to the number of simulations."
f" Recieved: `intensity={intensity}, size={size}`"
)
elif isinstance(size, int):
# assume int size with specified intensity means n_simulations at x intensity
if intensity is not None:
n_observations = int(intensity * _area(hull))
n_simulations = size
else: # assume we have one replication at the specified number of points
n_simulations = 1
n_observations = size
intensity = n_observations / _area(hull)
else:
raise ValueError(
f"Intensity and size not understood. Provide size as a tuple"
f" containing (number of observations, number of simulations)"
f" with no specified intensity, or an intensity and size equal"
f" to the number of simulations."
f" Recieved: `intensity={intensity}, size={size}`"
)
return (n_observations, n_simulations, intensity)
# ------------------------------------------------------------ #
# Distributions #
# ------------------------------------------------------------ #
def poisson(hull, intensity=None, size=None):
"""
Simulate a poisson random point process with a specified intensity.
Parameters
----------
hull : A geometry-like object
This encodes the "space" in which to simulate the normal pattern. All points will
lie within this hull. Supported values are:
- a bounding box encoded in a numpy array as numpy.array([xmin, ymin, xmax, ymax])
- an (N,2) array of points for which the bounding box will be computed & used
- a shapely polygon/multipolygon
- a pygeos geometry
- a scipy convexh hull
intensity : float
the number of observations per unit area in the hull to use. If provided, then
size must be an integer describing the number of replications to use.
size : tuple or int
a tuple of (n_observations, n_replications), where the first number is the number
of points to simulate in each replication and the second number is the number of
total replications. So, (10, 4) indicates 10 points, 4 times.
If an integer is provided and intensity is None, n_replications is assumed to be 1.
If size is an integer and intensity is also provided, then size indicates n_replications,
and the number of observations is computed from the intensity.
Returns
--------
: numpy.ndarray
either an (n_replications, n_observations, 2) or (n_observations,2) array containing
the simulated realizations.
"""
if isinstance(hull, numpy.ndarray):
if hull.shape == (4,):
hull = hull
else:
hull = _prepare_hull(hull)
n_observations, n_simulations, intensity = parse_size_and_intensity(
hull, intensity=intensity, size=size
)
result = numpy.empty((n_simulations, n_observations, 2))
bbox = _bbox(hull)
for i_replication in range(n_simulations):
generating = True
i_observation = 0
while i_observation < n_observations:
x, y = (
numpy.random.uniform(bbox[0], bbox[2]),
numpy.random.uniform(bbox[1], bbox[3]),
)
if _contains(hull, x, y):
result[i_replication, i_observation] = (x, y)
i_observation += 1
return result.squeeze()
def normal(hull, center=None, cov=None, size=None):
"""
Simulate a multivariate random normal point cluster
Parameters
----------
hull : A geometry-like object
This encodes the "space" in which to simulate the normal pattern. All points will
lie within this hull. Supported values are:
- a bounding box encoded in a numpy array as numpy.array([xmin, ymin, xmax, ymax])
- an (N,2) array of points for which the bounding box will be computed & used
- a shapely polygon/multipolygon
- a pygeos geometry
- a scipy convexh hull
center : iterable of shape (2, )
A point where the simulations will be centered.
cov : float or a numpy array of shape (2,2)
either the standard deviation of an independent and identically distributed
normal distribution, or a 2 by 2 covariance matrix expressing the covariance
of the x and y for the distribution. Default is half of the width or height
of the hull's bounding box, whichever is larger.
size : tuple or int
a tuple of (n_observations, n_replications), where the first number is the number
of points to simulate in each replication and the second number is the number of
total replications. So, (10, 4) indicates 10 points, 4 times.
If an integer is provided, n_replications is assumed to be 1.
Returns
--------
: numpy.ndarray
either an (n_replications, n_observations, 2) or (n_observations,2) array containing
the simulated realizations.
"""
if isinstance(hull, numpy.ndarray):
if hull.shape == (4,):
hull = hull
else:
hull = _prepare_hull(hull)
if center is None:
center = _centroid(hull)
n_observations, n_simulations, intensity = parse_size_and_intensity(
hull, intensity=None, size=size
)
if cov is None:
bbox = _bbox(hull)
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
cov = numpy.maximum(width / 2, height / 2) ** 2
if isinstance(cov, (int, float)):
sd = cov
cov = numpy.eye(2) * sd
elif isnstance(cov, numpy.ndarray):
if cov.ndim == 2:
assert cov.shape == (2, 2), "Bivariate covariance matrices must be 2 by 2"
elif cov.ndim == 3:
assert cov.shape[1:] == (
2,
2,
), "3-dimensional covariance matrices must have shape (n_simulations, 2,2)"
assert (
cov.shape[0] == n_simulations
), "3-dimensional covariance matrices must have shape (n_simulations, 2,2)"
else:
raise ValueError(
"`cov` argument must be a float (signifying a standard deviation)"
" or a 2 by 2 array expressing the covariance matrix of the "
" bivariate normal distribution."
)
result = numpy.empty((n_simulations, n_observations, 2))
bbox = _bbox(hull)
for i_replication in range(n_simulations):
generating = True
i_observation = 0
replication_cov = cov[i] if cov.ndim == 3 else cov
replication_sd = numpy.diagonal(replication_cov) ** 0.5
replication_cor = (1 / replication_sd) * replication_cov * (1 / replication_sd)
while i_observation < n_observations:
candidate = numpy.random.multivariate_normal((0, 0), replication_cor)
x, y = center + candidate * replication_sd
if _contains(hull, x, y):
result[i_replication, i_observation] = (x, y)
i_observation += 1
return result.squeeze()
def cluster_poisson(
hull, intensity=None, size=None, n_seeds=2, cluster_radius=None,
):
"""
Simulate a cluster poisson random point process with a specified intensity & number of seeds.
A cluster poisson process is a poisson process where the center of each "cluster" is
itself distributed according to a spatial poisson process.
Parameters
----------
hull : A geometry-like object
This encodes the "space" in which to simulate the normal pattern. All points will
lie within this hull. Supported values are:
- a bounding box encoded in a numpy array as numpy.array([xmin, ymin, xmax, ymax])
- an (N,2) array of points for which the bounding box will be computed & used
- a shapely polygon/multipolygon
- a pygeos geometry
- a scipy convexh hull
intensity : float
the number of observations per unit area in the hull to use. If provided, then
size must be an integer describing the number of replications to use.
size : tuple or int
a tuple of (n_observations, n_replications), where the first number is the number
of points to simulate in each replication and the second number is the number of
total replications. So, (10, 4) indicates 10 points, 4 times.
If an integer is provided and intensity is None, n_replications is assumed to be 1.
If size is an integer and intensity is also provided, then size indicates n_replications,
and the number of observations is computed from the intensity.
n_seeds : int
the number of sub-clusters to use.
cluster_radius : float or iterable
the radius of each cluster. If a float, the same radius is used for all clusters.
If an array, then there must be the same number of radii as clusters.
If None, 50% of the minimum inter-point distance is used, which may fluctuate across
replications.
Returns
--------
: numpy.ndarray
either an (n_replications, n_observations, 2) or (n_observations,2) array containing
the simulated realizations.
"""
if isinstance(hull, numpy.ndarray):
if hull.shape == (4,):
hull = hull
else:
hull = _prepare_hull(hull)
if isinstance(cluster_radius, numpy.ndarray):
cluster_radii = cluster_radius.flatten()
assert len(cluster_radii) == n_seeds, (
f"number of radii provided ({len(cluster_radii)})"
f"does not match number of clusters requested"
f" ({n_seeds})."
)
elif isinstance(cluster_radius, (int, float)):
cluster_radii = [cluster_radius] * n_seeds
n_observations, n_simulations, intensity = parse_size_and_intensity(
hull, intensity=intensity, size=size
)
result = | numpy.empty((n_simulations, n_observations, 2)) | numpy.empty |
import numpy
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import pandas as pd
import torch
import configparser
from all_parameters_sentiment import get_all_parameters
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from workspace_cls_kw import SENT_WORDID, SENT_LABELID, SENT_WORD_MASK, SENT_ORIGINAL_TXT, KWS_IDS, KWS_IDF
from torch.utils.data import Dataset, DataLoader, RandomSampler, SubsetRandomSampler
import argparse
from utils_torch_cls_kw import compute_values, get_data, compute_values_eval
from experiment_imax_kw_sentiment import RunExperiment
from workspace_cls_kw import workspace
from model_imax_kw_sentiment import *
from vocabulary_cls import get_word_info
import math
import random
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import nltk
nltk.data.path.append("/content/nltk_data/")
import re
import string
from gensim.models.word2vec import Word2Vec, LineSentence
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models.keyedvectors import KeyedVectors
import fasttext
import fasttext.util
from gensim.models.fasttext import load_facebook_model
from gensim.models.fasttext import FastText as FT_gensim
from gensim.test.utils import datapath
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from random import shuffle
from wikipedia2vec import Wikipedia2Vec
from pprint import pprint
from copy import deepcopy
import time
from datetime import datetime, timedelta
from gensim import utils, matutils
# NLTK Stop words
from gensim.utils import simple_preprocess
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import _pickle as cPickle
def read_pickle(filepath, filename):
f = open(os.path.join(filepath, filename), 'rb')
read_file = cPickle.load(f)
f.close()
return read_file
def save_pickle(filepath, filename, data):
f = open(os.path.join(filepath, filename), 'wb')
cPickle.dump(data, f)
print(" file saved to: %s"%(os.path.join(filepath, filename)))
f.close()
def get_stop_words(stop_file_path):
"""load stop words """
with open(stop_file_path, 'r', encoding="utf-8") as f:
stopwords = f.readlines()
stop_set = set(m.strip() for m in stopwords)
return frozenset(stop_set)
def cleaning_text(txt):
punct = ''.join([p for p in string.punctuation])
txt = txt.replace('i.e.', 'id est')
txt = txt.replace('e.g.', 'exempli gratia')
txt = txt.lower().replace('q&a', 'question and answer')
txt = txt.replace('&', 'and')
txt = re.sub(r'@\w+', '', txt)
txt = re.sub(r'[-+]?[.\d]*[\d]+[:,.\d]*', '', txt)
txt = re.sub(r'[^\x00-\x7f]', '', txt)
txt = re.sub(r'\b\w{1}\b', '', txt)
txt = re.sub(r'\b\w{20.1000}\b', '', txt)
regex = re.compile('[%s]' % re.escape(punct))
txt = regex.sub(' ', txt)
txt = ' '.join(txt.split())
return txt
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
#use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
for idx, score in sorted_items:
fname = feature_names[idx]
#keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
#create a tuples of feature,score
#results = zip(feature_vals,score_vals)
results= {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]]=score_vals[idx]
return results
def get_amazondata():
xdat = []
PATH = '/content/protoinfomax/data/AmazonDat/'
print("Processing Apps_for_Android ...")
sys.stdout.flush()
with open(PATH+'train/Apps_for_Android.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Beauty ...")
sys.stdout.flush()
with open(PATH+'train/Beauty.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Books ...")
sys.stdout.flush()
with open(PATH+'train/Books.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing CDs_and_Vinyl ...")
sys.stdout.flush()
with open(PATH+'train/CDs_and_Vinyl.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Cell_Phones_and_Accessories ...")
sys.stdout.flush()
with open(PATH+'train/Cell_Phones_and_Accessories.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Clothing_Shoes_and_Jewelry ...")
sys.stdout.flush()
with open(PATH+'train/Clothing_Shoes_and_Jewelry.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Electronics ...")
sys.stdout.flush()
with open(PATH+'train/Electronics.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Health_and_Personal_Care ...")
sys.stdout.flush()
with open(PATH+'train/Health_and_Personal_Care.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Home_and_Kitchen ...")
sys.stdout.flush()
with open(PATH+'train/Home_and_Kitchen.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Kindle_Store ...")
sys.stdout.flush()
with open(PATH+'train/Kindle_Store.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Movies_and_TV ...")
sys.stdout.flush()
with open(PATH+'train/Movies_and_TV.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Office_Products ...")
sys.stdout.flush()
with open(PATH+'train/Office_Products.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Sports_and_Outdoors ...")
sys.stdout.flush()
with open(PATH+'train/Sports_and_Outdoors.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
clean_xdat = []
for txt in xdat:
txt = cleaning_text(txt)
clean_xdat.append(txt)
return clean_xdat
def get_intentdata():
xdat = []
PATH = '/content/protoinfomax/data/IntentDat/'
print("Processing Assistant ...")
sys.stdout.flush()
with open(PATH+'train/Assistant.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Atis ...")
sys.stdout.flush()
with open(PATH+'train/Atis.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Finance ...")
sys.stdout.flush()
with open(PATH+'train/Finance.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
print("Processing Stackoverflow ...")
sys.stdout.flush()
with open(PATH+'train/Stackoverflow.train') as f:
for line in f:
xdat.append(line.split('\t')[0])
clean_xdat = []
for txt in xdat:
txt = cleaning_text(txt)
clean_xdat.append(txt)
return clean_xdat
def finetuning_amazon(data):
print("Training Word Embeddings on Amazon data set...")
model = load_facebook_model('/content/protoinfomax/embeddings/cc.en.100.bin')
oldmodel = deepcopy(model)
data = [t.split() for t in data]
n_sents = len(data)
model.build_vocab(data, update=True)
model.train(data, total_examples=n_sents, epochs=20)
model.save('/content/protoinfomax/embeddings/w2v_fasttext_sentiment.model')
for m in ['oldmodel', 'model']:
print('The vocabulary size of the w2v_fasttext_cls', m, 'is', len(eval(m).wv.vocab))
sys.stdout.flush()
def finetuning_intent(data):
print("Training Word Embeddings on Intent data set...")
model = load_facebook_model('/content/protoinfomax/embeddings/cc.en.100.bin')
oldmodel = deepcopy(model)
data = [t.split() for t in data]
n_sents = len(data)
model.build_vocab(data, update=True)
model.train(data, total_examples=n_sents, epochs=20)
model.save('/content/protoinfomax/embeddings/w2v_fasttext_intent.model')
for m in ['oldmodel', 'model']:
print('The vocabulary size of the w2v_fasttext_cls', m, 'is', len(eval(m).wv.vocab))
sys.stdout.flush()
# scripts for loading pretrained word embedding model
def load_w2v_sentiment():
vocab_new = []
word_vecs_new = []
zeros_init = [float(0.)] * 100
model = Word2Vec.load('/content/protoinfomax/embeddings/w2v_fasttext_sentiment.model')
vocab = list(model.wv.vocab)
word_vecs = model.wv.vectors
w2v = model.wv
if '</s>' in vocab:
idx = vocab.index('</s>')
vocab[idx] = '</s2>'
if '<unk>' in vocab:
idx = vocab.index('<unk>')
vocab[idx] = '<unk2>'
word_vecs = word_vecs.tolist()
vocab_new.append('</s>') #0
word_vecs_new.append(zeros_init)
vocab_new.append('<unk>') #1
word_vecs_new.append(zeros_init)
vocab_new.extend(vocab)
word_vecs_new.extend(word_vecs)
word_vecs_new = np.array(word_vecs_new)
return vocab_new, word_vecs_new
def load_w2v_intent():
vocab_new = []
word_vecs_new = []
zeros_init = [float(0.)] * 100
model = Word2Vec.load('/content/protoinfomax/embeddings/w2v_fasttext_intent.model')
vocab = list(model.wv.vocab)
word_vecs = model.wv.vectors
w2v = model.wv
if '</s>' in vocab:
idx = vocab.index('</s>')
vocab[idx] = '</s2>'
if '<unk>' in vocab:
idx = vocab.index('<unk>')
vocab[idx] = '<unk2>'
word_vecs = word_vecs.tolist()
vocab_new.append('</s>') #0
word_vecs_new.append(zeros_init)
vocab_new.append('<unk>') #1
word_vecs_new.append(zeros_init)
vocab_new.extend(vocab)
word_vecs_new.extend(word_vecs)
word_vecs_new = np.array(word_vecs_new)
return vocab_new, word_vecs_new
def get_per_domain(PATH, domain):
#Apps_for_Android.train
xdat = []
ydat = []
print("Processing domain ...", domain)
sys.stdout.flush()
with open(PATH+domain) as f:
for line in f:
xdat.append(line.split('\t')[0])
ydat.append(line.split('\t')[1][0])
clean_xdat = []
for txt in xdat:
txt = cleaning_text(txt)
clean_xdat.append(txt)
return clean_xdat, ydat
def extract_kws(PATH, domain, params):
cv = params['cv']
tfidf_transformer = params['tfidf_transformer']
word2idx = params['vocabulary']
UNKNOWN_WORD_INDEX = word2idx['<unk>']
PAD = word2idx['</s>']
text, y = get_per_domain(PATH, domain)
print("Extracting keywords ...")
sys.stdout.flush()
feature_names=cv.get_feature_names()
tf_idf_vector=tfidf_transformer.transform(cv.transform(text))
results_keywords_id =[]
results_vals =[]
results_kws =[]
for i in range(tf_idf_vector.shape[0]):
# get vector for a single document
curr_vector=tf_idf_vector[i]
#sort the tf-idf vector by descending order of scores
sorted_items=sort_coo(curr_vector.tocoo())
#extract only the top n; n here is 10
keywords=extract_topn_from_vector(feature_names,sorted_items,10)
kws = list(keywords.keys())
vals = list(keywords.values())
keywords_id = []
for kw, val in keywords.items():
if kw in word2idx:
keywords_id.append(word2idx[kw])
else:
keywords_id.append(word2idx['<unk>'])
if len(keywords_id) < 10:
len_ = 10 - len(keywords_id)
for _ in range(len_):
keywords_id.append(word2idx['</s>'])
vals.append(0.)
results_keywords_id.append(keywords_id)
results_vals.append(vals)
results_kws.append(kws)
df = pd.DataFrame(zip(text, y, results_keywords_id, results_vals, results_kws), columns=['doc','label', 'keywords_id', 'vals', 'kws'])
path = PATH+domain
base=os.path.basename(path)
fn = os.path.splitext(base)[0]
np.savetxt('/content/protoinfomax/data/AmazonDat/train/Kws_%s.train'%fn, df.values, fmt='%s', delimiter='\t')
#Amazondat/
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Extracting Keywords for ProtoInfoMax++ from sentiment dataset ...")
parser.add_argument('-config', help="path to configuration file",
default="./config")
parser.add_argument('-section', help="the section name of the experiment")
args = parser.parse_args()
config_paths = [args.config]
config_parser = configparser.SafeConfigParser()
config_found = config_parser.read(config_paths)
params = get_all_parameters(config_parser, args.section)
params['model_string'] = args.section
| numpy.random.seed(params['seed']) | numpy.random.seed |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,-1,0,1,0,0,0,0,1]) | numpy.array |
import os
import time
import logging
import numpy as np
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.integrate import solve_ivp, trapz, quad
from .utils import InvalidJumpError
from .utils import GRAV_ACC, EPS
from .utils import compute_dist_from_flat, vel2speed
if 'ONHEROKU' in os.environ:
plt = None
else:
import matplotlib.pyplot as plt
class Surface(object):
"""Base class for a 2D curve that represents the cross section of a surface
expressed in a standard Cartesian coordinate system."""
# If a user provides x,y data to create the surface that has any x spacings
# greater than this value, then the data will be interpolated before the
# slope and curvature derivatives are calculated.
max_x_spacing = 0.3 # meters
def __init__(self, x, y):
"""Instantiates an arbitrary 2D surface.
Parameters
==========
x : array_like, shape(n,)
The horizontal, x, coordinates of the slope. x[0] should be the
left most horizontal position and corresponds to the start of the
surface. This should be monotonically increasing and ideally have
no adjacent spacings less than 0.3 meter.
y : array_like, shape(n,)
The vertical, y, coordinates of the slope. y[0] corresponds to the
start of the surface.
Warns
=====
x and y values that have any x spacings larger than 0.3 meters will be
resampled at x spacings of approximately 0.3 meters.
"""
self.x = np.asarray(x)
self.y = np.asarray(y)
self._initialize_surface()
def _initialize_surface(self):
self._check_monotonic()
self._check_x_spacing()
self._initialize_gradients()
self._initialize_interpolators()
def _check_x_spacing(self):
"""Resamples x and y at an approximately 0.3 linear spacing if any x
spacings are too large."""
if any(np.diff(self.x) > self.max_x_spacing):
msg = ('The x values have at least one spacing larger than '
'{:1.1f} meters and will be replace with a finer x spacing '
'and the y values linearly interpolated at this new '
'spacing.')
logging.warning(msg.format(self.max_x_spacing))
# ensure spacing is less than max_x_spacing
total_x = self.x[-1] - self.x[0]
num = round(np.ceil(total_x / self.max_x_spacing)) + 1
x = np.linspace(self.x[0], self.x[-1], num=num)
kwargs = {'fill_value': 'extrapolate'}
interp_y = interp1d(self.x, self.y, **kwargs)
y = interp_y(x)
self.x = x
self.y = y
def _initialize_gradients(self):
self.slope = np.gradient(self.y, self.x, edge_order=2)
slope_deriv = np.gradient(self.slope, self.x, edge_order=2)
self.curvature = slope_deriv / (1 + self.slope**2)**1.5
def _initialize_interpolators(self):
kwargs = {'fill_value': 'extrapolate'}
self.interp_y = interp1d(self.x, self.y, **kwargs)
self.interp_slope = interp1d(self.x, self.slope, **kwargs)
self.interp_curvature = interp1d(self.x, self.curvature, **kwargs)
def _check_monotonic(self):
# NOTE: eps solution only works when adding to 0.
eps = np.finfo(float).eps
count = 0
while any(np.diff(self.x) == 0):
idx = np.array(np.where(np.diff(self.x) == 0), dtype=np.int32)
self.x[idx+1] += 20*eps
count += 1
if count > 10:
msg = ('While loop ran for too long: epsilon error')
raise InvalidJumpError(msg)
if any(np.diff(self.x) < 0):
msg = ('x-coordinates are not monotonically increasing.')
raise InvalidJumpError(msg)
@property
def start(self):
"""Returns the x and y coordinates at the start point of the
surface."""
return self.x[0], self.y[0]
@property
def end(self):
"""Returns the x and y coordinates at the end point of the surface."""
return self.x[-1], self.y[-1]
def shift_coordinates(self, delx, dely):
"""Shifts the x and y coordinates by delx and dely respectively. This
modifies the surface in place."""
self.x += delx
self.y += dely
# NOTE : Only the interpolators have to be reinitialized, the gradients
# don't have to be computed again. For now, this method is here for
# consistency among *Surface classes.
self._initialize_surface()
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
Note
====
This general implementation can be slow, so implement overloaded
``distance_from()`` methods in subclasses when you can.
"""
def distance_squared(x):
return (xp - x)**2 + (yp - self.interp_y(x))**2
distances = np.sqrt((self.x - xp)**2 + (self.y - yp)**2)
x = fsolve(distance_squared, self.x[np.argmin(distances)])
return np.sign(yp - self.interp_y(x)) * np.sqrt(distance_squared(x))
def length(self):
"""Returns the length of the surface in meters via a numerical line
integral."""
def func(x):
return np.sqrt(1.0 + self.interp_slope(x)**2)
return quad(func, self.x[0], self.x[-1])[0]
def area_under(self, x_start=None, x_end=None, interval=0.05):
"""Returns the area under the curve integrating wrt to the x axis at
0.05 m intervals using the trapezoidal rule."""
if x_start is not None:
if x_start < self.start[0] or x_start > self.end[0]:
raise ValueError('x_start has to be between start and end.')
else:
x_start = self.start[0]
if x_end is not None:
if x_end < self.start[0] or x_end > self.end[0]:
raise ValueError('x_end has to be between start and end.')
else:
x_end = self.end[0]
x = np.linspace(x_start, x_end, num=int((x_end - x_start) / interval))
y = self.interp_y(x)
return trapz(y, x)
def height_above(self, surface):
"""Returns an array of values giving the height each point in this
surface is above the provided surface."""
return self.y - surface.interp_y(self.x)
def calculate_efh(self, takeoff_angle, takeoff_point, skier, increment=0.2):
"""Returns the equivalent fall height for the surface at the specified
constant intervals relative to the provided takeoff point or the start
of the surface.
Parameters
==========
takeoff_angle : float
Takeoff angle in radians.
takeoff_point : 2-tuple of floats
x and y coordinates of the point at which the skier leaves the
takeoff ramp.
skier : Skier
A skier instance.
increment : float, optional
x increment in meters between each calculated landing location.
Returns
=======
distance_x : ndarray, shape(n,)
Horizontal x locations of the equivalent fall height measures
spaced at the specified meter intervals relative to leftmost point
on the surface or the takeoff point, whichever is greater.
efh : ndarray, shape(n,)
The equivalent fall height corresponding to each value in
``distance_x``.
takeoff_speeds : ndarray, shape(n,)
The takeoff speed required to land the corresponding x coordinate.
"""
if abs(takeoff_angle) > np.pi/2:
msg = ('Takeoff angle must be between -pi/2 and pi/2.')
raise InvalidJumpError(msg)
if self.x[0] < takeoff_point[0] < self.x[-1]:
check_takeoff = self.interp_y(takeoff_point[0])
if takeoff_point[1] - check_takeoff < 0:
msg = ('Takeoff point cannot be under the surface.')
raise InvalidJumpError(msg)
elif self.end[0] <= takeoff_point[0]:
msg = ('Takeoff point cannot be downhill from surface.')
raise InvalidJumpError(msg)
# NOTE : If the takeoff point is before the start of the surface and below the
# height of the first surface point, the slope between the takeoff point
# and the left-most surface point must be less than the takeoff angle.
elif (takeoff_point[0] < self.start[0]):
slope = (self.start[1] - takeoff_point[1])/(self.start[0] - takeoff_point[0])
if takeoff_angle < np.arctan(slope):
msg = ('Takeoff angle does not allow impact on the surface '
'from above.')
raise InvalidJumpError(msg)
isGreaterTakeoff = self.x >= takeoff_point[0]
x = self.x[isGreaterTakeoff]
y = self.y[isGreaterTakeoff]
# NOTE : intervals are desired but the x distance is not necessarily
# divisible by the increment, so we drop the remainder so it is
# divisible and make the range inclusive.
remainder = (x[-1] - x[0]) % increment
rnge = (x[0], x[-1] - remainder)
num_points = int((x[-1] - x[0] - remainder) / increment) + 1
distance_x = np.linspace(*rnge, num=num_points)
slope = self.interp_slope(distance_x)
slope_angle = np.arctan(slope)
kwargs = {'fill_value': 'extrapolate'}
interp_y_efh = interp1d(x, y, **kwargs)
height_y = interp_y_efh(distance_x)
# NOTE : Create a surface under the surface that the skier will impact
# if they pass over the primary surface (self).
catch_surf = HorizontalSurface(np.min(height_y) - 0.1,
abs(distance_x[0] - distance_x[-1] + 2.0),
start=distance_x[-1] - 1.0)
efh = np.empty(len(distance_x))
efh[:] = np.nan
takeoff_speeds = np.full(len(distance_x), np.nan)
for i, (x, y, m) in enumerate(zip(distance_x, height_y, slope_angle)):
takeoff_speed, impact_vel = \
skier.speed_to_land_at((x, y), takeoff_point, takeoff_angle,
catch_surf)
# TODO: Use fly to check that it hits the x,y
impact_speed, impact_angle = vel2speed(*impact_vel)
# NOTE : A nan is inserted if skier surpasses 100 miles per hour
if takeoff_speed > 44:
msg = ('Impact of the surface from above is only possible until'
' {:.2f} meters. Calculation aborted.')
logging.warning(msg.format(x))
break
efh[i] = (impact_speed ** 2 * np.sin(m - impact_angle) ** 2 /
(2 * GRAV_ACC))
takeoff_speeds[i] = takeoff_speed
return distance_x, efh, takeoff_speeds
def plot(self, ax=None, **plot_kwargs):
"""Returns a matplotlib axes containing a plot of the surface.
Parameters
==========
ax : Axes
An existing matplotlib axes to plot to.
plot_kwargs : dict
Arguments to be passed to Axes.plot().
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.set_ylabel('Vertical Position [m]')
ax.set_xlabel('Horizontal Position [m]')
ax.plot(self.x, self.y, **plot_kwargs)
# TODO : These two lines probably only need to be set if ax is None.
ax.set_aspect('equal')
ax.grid()
return ax
class HorizontalSurface(Surface):
def __init__(self, height, length, start=0.0, num_points=100):
"""Instantiates a class that represents a horizontal surface at a
height above the x axis.abs
Parameters
==========
height : float
The height of the surface above the horizontal x axis in meters.
length : float
The length of the surface in meters.
start : float, optional
The x location of the start of the left most point of the surface.
num_points : integer, optional
The number of (x,y) coordinates.
"""
x = np.linspace(start, start + length, num=num_points)
y = height * np.ones_like(x)
super(HorizontalSurface, self).__init__(x, y)
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
"""
return yp - self.y[0]
class FlatSurface(Surface):
"""Class that represents a flat surface angled relative to the
horizontal."""
def __init__(self, angle, length, init_pos=(0.0, 0.0), num_points=100):
"""Instantiates a flat surface that is oriented at a counterclockwise
angle from the horizontal.
Parameters
==========
angle : float
The angle of the surface in radians. Counterclockwise (about z) is
positive, clockwise is negative.
length : float
The distance in meters along the surface from the initial position.
init_pos : 2-tuple of floats, optional
The x and y coordinates in meters that locate the start of the
surface.
num_points : integer, optional
The number of points used to define the surface coordinates.
"""
if angle >= np.pi / 2.0 or angle <= -np.pi / 2.0:
raise InvalidJumpError('Angle must be between -90 and 90 degrees')
self._angle = angle
x = np.linspace(init_pos[0], init_pos[0] + length * np.cos(angle),
num=num_points)
y = np.linspace(init_pos[1], init_pos[1] + length * np.sin(angle),
num=num_points)
super(FlatSurface, self).__init__(x, y)
@property
def angle(self):
"""Returns the angle wrt to horizontal in radians of the surface."""
return self._angle
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
"""
if compute_dist_from_flat is None:
m = np.tan(self.angle)
d = (yp - m * xp) * np.cos(self.angle)
return d
else:
return compute_dist_from_flat(self.angle, xp, yp)
class ClothoidCircleSurface(Surface):
"""Class that represents a surface made up of a circle bounded by two
clothoids."""
def __init__(self, entry_angle, exit_angle, entry_speed, tolerable_acc,
init_pos=(0.0, 0.0), gamma=0.99, num_points=200):
"""Instantiates a clothoid-circle-clothoid curve.
Parameters
==========
entry_angle : float
The entry angle tangent to the start of the left clothoid in
radians.
exit_angle : float
The exit angle tangent to the end of the right clothoid in radians.
entry_speed : float
The magnitude of the skier's velocity in meters per second as they
enter the left clothiod.
tolerable_acc : float
The tolerable normal acceleration of the skier in G's.
init_pos : 2-tuple of floats
The x and y coordinates of the start of the left clothoid.
gamma : float
Fraction of circular section.
num_points : integer, optional
The number of points in each of the three sections of the curve.
"""
self.entry_angle = entry_angle
self.exit_angle = exit_angle
self.entry_speed = entry_speed
self.tolerable_acc = tolerable_acc
self.init_pos = init_pos
self.gamma = gamma
self.num_points = num_points
X, Y = self._create_surface()
super(ClothoidCircleSurface, self).__init__(X, Y)
def _create_surface(self):
# TODO : Break this function into smaller functions.
lam = -self.entry_angle
beta = self.exit_angle
rotation_clothoid = (lam - beta) / 2
# used to rotate symmetric clothoid so that left side is at lam and
# right sid is at beta
# radius_min is the radius of the circular part of the transition.
# Every other radius length (in the clothoid) will be longer than that,
# as this will ensure the g - force felt by the skier is always less
# than a desired value. This code ASSUMES that the velocity at the
# minimum radius is equal to the velocity at the end of the approach.
radius_min = self.entry_speed**2 / (self.tolerable_acc * GRAV_ACC)
# x,y data for circle
thetaCir = 0.5 * self.gamma * (lam + beta)
xCirBound = radius_min * np.sin(thetaCir)
xCirSt = -radius_min * np.sin(thetaCir)
xCir = np.linspace(xCirSt, xCirBound, num=self.num_points)
# x,y data for one clothoid
A_squared = radius_min**2 * (1 - self.gamma) * (lam + beta)
A = np.sqrt(A_squared)
clothoid_length = A * np.sqrt((1 - self.gamma) * (lam + beta))
# generates arc length points for one clothoid
s = np.linspace(clothoid_length, 0, num=self.num_points)
X1 = s - (s**5) / (40*A**4) + (s**9) / (3456*A**8)
Y1 = (s**3) / (6*A**2) - (s**7) / (336*A**6) + (s**11) / (42240*A**10)
X2 = X1 - X1[0]
Y2 = Y1 - Y1[0]
theta = (lam + beta) / 2
X3 = np.cos(theta)*X2 + np.sin(theta)*Y2
Y3 = -np.sin(theta)*X2 + np.cos(theta)*Y2
X4 = X3
Y4 = Y3
X5 = -X4 + 2*X4[0]
Y5 = Y4
X4 = X4 - radius_min*np.sin(thetaCir)
Y4 = Y4 + radius_min*(1 - np.cos(thetaCir))
X4 = X4[::-1]
Y4 = Y4[::-1]
X5 = X5 + radius_min*np.sin(thetaCir)
Y5 = Y5 + radius_min*(1 - np.cos(thetaCir))
# stitching together clothoid and circular data
xLCir = xCir[xCir <= 0]
yLCir = radius_min - np.sqrt(radius_min**2 - xLCir**2)
xRCir = xCir[xCir >= 0]
yRCir = radius_min - np.sqrt(radius_min**2 - xRCir**2)
X4 = np.hstack((X4, xLCir[1:-1]))
Y4 = np.hstack((Y4, yLCir[1:-1]))
X5 = np.hstack((xRCir[0:-2], X5))
Y5 = np.hstack((yRCir[0:-2], Y5))
X6 = np.cos(rotation_clothoid)*X4 + np.sin(rotation_clothoid)*Y4
Y6 = -np.sin(rotation_clothoid)*X4 + np.cos(rotation_clothoid)*Y4
X7 = np.cos(rotation_clothoid)*X5 + np.sin(rotation_clothoid)*Y5
Y7 = -np.sin(rotation_clothoid)*X5 + np.cos(rotation_clothoid)*Y5
X = np.hstack((X6, X7))
Y = np.hstack((Y6, Y7))
# Shift the entry point of the curve to be at X=0, Y=0.
X -= np.min(X)
Y -= Y[np.argmin(X)]
# Shift the entry point of the curve to be at the end of the flat
# surface.
X += self.init_pos[0]
Y += self.init_pos[1]
return X, Y
class TakeoffSurface(Surface):
"""Class that represents a surface made up of a circle bounded by two
clothoids with a flat exit surface."""
def __init__(self, skier, entry_angle, exit_angle, entry_speed,
time_on_ramp=0.25, gamma=0.99, init_pos=(0.0, 0.0),
num_points=200):
"""Instantiates the takeoff curve with the flat takeoff ramp added to
the terminus of the clothoid-circle-clothoid curve.
Parameters
==========
skier : Skier
A skier instance.
entry_angle : float
The entry angle tangent to the start of the left clothoid in
radians.
exit_angle : float
The exit angle tangent to the end of the right clothoid in radians.
entry_speed : float
The magnitude of the skier's velocity in meters per second as they
enter the left clothiod.
time_on_ramp : float, optional
The time in seconds that the skier should be on the takeoff ramp
before launch.
gamma : float, optional
Fraction of circular section.
init_pos : 2-tuple of floats, optional
The x and y coordinates of the start of the left clothoid.
num_points : integer, optional
The number of points in each of the three sections of the curve.
"""
self.skier = skier
self.entry_angle = entry_angle
self.exit_angle = exit_angle
self.entry_speed = entry_speed
self.time_on_ramp = time_on_ramp
self.gamma = gamma
self.init_pos = init_pos
self.num_points = num_points
clt_cir_clt = ClothoidCircleSurface(entry_angle, exit_angle,
entry_speed,
skier.tolerable_sliding_acc,
init_pos=init_pos, gamma=gamma,
num_points=num_points)
ramp_entry_speed = skier.end_speed_on(clt_cir_clt,
init_speed=self.entry_speed)
ramp_len = time_on_ramp * ramp_entry_speed # meters
start_x = clt_cir_clt.x[-1]
start_y = clt_cir_clt.y[-1]
points_per_meter = len(clt_cir_clt.x) / (start_x - clt_cir_clt.x[0])
stop_x = start_x + ramp_len * np.cos(clt_cir_clt.exit_angle)
ramp_x = np.linspace(start_x, stop_x,
num=int(points_per_meter * stop_x - start_x))
stop_y = start_y + ramp_len * np.sin(clt_cir_clt.exit_angle)
ramp_y = np.linspace(start_y, stop_y, num=len(ramp_x))
ext_takeoff_curve_x = np.hstack((clt_cir_clt.x[:-1], ramp_x))
ext_takeoff_curve_y = np.hstack((clt_cir_clt.y[:-1], ramp_y))
super(TakeoffSurface, self).__init__(ext_takeoff_curve_x,
ext_takeoff_curve_y)
class LandingTransitionSurface(Surface):
"""Class representing a acceleration limited exponential curve that
transitions the skier from the landing surface to the parent slope."""
acc_error_tolerance = 0.001
max_iterations = 1000
delta = 0.01 # used for central difference approximation
def __init__(self, parent_surface, flight_traj, fall_height, tolerable_acc,
num_points=100):
"""Instantiates an exponentially decaying surface that connects the
landing surface to the parent slope.
Parameters
==========
parent_surface : FlatSurface
The parent slope in which the landing transition should be tangent
to on exit.
flight_traj : Trajectory
The flight trajectory from the takeoff point to the parent slope.
fall_height : float
The desired equivalent fall height for the jump design in meters.
tolerable_acc : float
The maximum normal acceleration the skier should experience in the
landing.
num_points : integer
The number of points in the surface.
"""
if fall_height <= 0.0:
raise InvalidJumpError('Fall height must be greater than zero.')
self.fall_height = fall_height
self.parent_surface = parent_surface
self.flight_traj = flight_traj
self.tolerable_acc = tolerable_acc
trans_x, char_dist = self.find_transition_point()
x, y = self._create_trans_curve(trans_x, char_dist, num_points)
super(LandingTransitionSurface, self).__init__(x, y)
@property
def allowable_impact_speed(self):
"""Returns the perpendicular speed one would reach if dropped from the
provided fall height."""
return np.sqrt(2 * GRAV_ACC * self.fall_height)
def calc_trans_acc(self, x):
"""Returns the acceleration in G's the skier feels at the exit
transition occurring if the transition starts at the provided
horizontal location, x."""
# TODO : This code seems to be repeated some in the LandingSurface
# creation code.
# NOTE : "slope" means dy/dx here
flight_y, flight_speed, flight_angle = \
self.flight_traj.interp_wrt_x(x)[[2, 9, 8]]
# NOTE : Not sure if setting this to pi/2 if the flight speed is
# greater than the allowable impact speed is a correct thing to do but
# it prevents some arcsin RunTimeWarnings for invalid values.
ratio = self.allowable_impact_speed / flight_speed
if ratio > 1.0:
flight_rel_landing_angle = np.pi / 2
else:
flight_rel_landing_angle = np.arcsin(ratio)
landing_angle = flight_angle + flight_rel_landing_angle
landing_slope = np.tan(landing_angle) # y'E(x0)
parent_slope = self.parent_surface.interp_slope(x)
parent_rel_landing_slope = landing_slope - parent_slope
parent_y = self.parent_surface.interp_y(x)
height_above_parent = flight_y - parent_y # C in Mont's paper
# required exponential characteristic distance, using three
# characteristic distances for transition
char_dist = np.abs(height_above_parent / parent_rel_landing_slope)
ydoubleprime = height_above_parent / char_dist**2
curvature = np.abs(ydoubleprime / (1 + landing_slope**2)**1.5)
trans_acc = (curvature * flight_speed**2 + GRAV_ACC *
np.cos(landing_angle))
return np.abs(trans_acc / GRAV_ACC), char_dist
def _find_dgdx(self, x):
x_plus = x + self.delta
x_minus = x - self.delta
acc_plus, _ = self.calc_trans_acc(x_plus)
acc_minus, _ = self.calc_trans_acc(x_minus)
return (acc_plus - acc_minus) / 2 / self.delta
def find_transition_point(self):
"""Returns the horizontal position indicating the intersection of the
flight path with the beginning of the landing transition. This is the
last possible transition point, that by definition minimizes the
transition snow budget, that satisfies the allowable transition
acceleration.
Notes
=====
This uses Newton's method to find an adequate point but may fail to do
so with some combinations of flight trajectories, parent slope
geometry, and allowable acceleration. A warning will be emitted if the
maximum number of iterations is reached in this search and the curve is
likely invalid.
"""
i = 0
g_error = np.inf
x, _ = self.find_parallel_traj_point()
xpara = float(x) # copy
while g_error > .001: # tolerance
transition_Gs, char_dist = self.calc_trans_acc(x)
g_error = abs(transition_Gs - self.tolerable_acc)
dx = -g_error / self._find_dgdx(x)
x += dx
if x >= self.flight_traj.pos[-1, 0]:
msg = ('No landing transition point was found, backing up to '
'last possible point.')
logging.info(msg)
x = self.flight_traj.pos[-1, 0] - 2 * self.delta
if i > self.max_iterations:
msg = 'Landing transition while loop ran more than {} times.'
logging.warning(msg.format(self.max_iterations))
break
else:
i += 1
logging.debug('{} iterations in the landing transition loop.'.format(i))
x -= dx # loop stops after dx is added, so take previous
msg = ("The maximum landing transition acceleration is {} G's and the "
"tolerable landing transition acceleration is {} G's.")
logging.info(msg.format(transition_Gs, self.tolerable_acc))
if x < xpara:
msg = 'Not able to find valid landing transition point.'
raise InvalidJumpError(msg)
return x, char_dist
def find_parallel_traj_point(self):
"""Returns the position of a point on the flight trajectory where its
tangent is parallel to the parent slope. This is used as a starting
guess for the start of the landing transition point."""
slope_angle = self.parent_surface.angle
flight_traj_slope = self.flight_traj.slope
# TODO : Seems like these two interpolations can be combined into a
# single interpolation call by adding the y coordinate to the following
# line.
xpara_interpolator = interp1d(flight_traj_slope,
self.flight_traj.pos[:, 0])
xpara = xpara_interpolator(np.tan(slope_angle))
ypara = self.flight_traj.interp_wrt_x(xpara)[2]
return xpara, ypara
def _create_trans_curve(self, trans_x, char_dist, num_points):
xTranOutEnd = trans_x + 3 * char_dist
xParent = np.linspace(trans_x, xTranOutEnd, num_points)
yParent0 = self.parent_surface.interp_y(trans_x)
yParent = (yParent0 + (xParent - trans_x) *
np.tan(self.parent_surface.angle))
xTranOut = np.linspace(trans_x, xTranOutEnd, num_points)
dy = (self.flight_traj.interp_wrt_x(trans_x)[2] -
self.parent_surface.interp_y(trans_x))
yTranOut = yParent + dy * np.exp(-1*(xTranOut - trans_x) / char_dist)
return xTranOut, yTranOut
class LandingSurface(Surface):
"""Class that defines an equivalent fall height landing surface."""
def __init__(self, skier, takeoff_point, takeoff_angle, max_landing_point,
fall_height, surf):
"""Instantiates a surface that ensures impact velocity is equivalent to
that from a vertical fall.
Parameters
==========
skier : Skier
A skier instance.
takeoff_point : 2-tuple of floats
The point at which the skier leaves the takeoff ramp.
takeoff_angle : float
The takeoff angle in radians.
max_landing_point : 2-tuple of floats
The maximum x position that the landing surface will attain in
meters. In the standard design, this is the start of the landing
transition point.
fall_height : float
The desired equivalent fall height in meters. This should always be
greater than zero.
surf : Surface
A surface below the full flight trajectory, the parent slope is a
good choice. It is useful if the distance_from() method runs very
fast, as it is called a lot internally.
"""
if fall_height <= 0.0:
raise InvalidJumpError('Fall height must be greater than zero.')
self.skier = skier
self.takeoff_point = takeoff_point
self.takeoff_angle = takeoff_angle
self.max_landing_point = max_landing_point
self.fall_height = fall_height
self.surf = surf
x, y = self._create_safe_surface()
super(LandingSurface, self).__init__(x, y)
@property
def allowable_impact_speed(self):
"""Returns the perpendicular speed one would reach if dropped from the
provided fall height."""
# NOTE : This is used in the LandingTransitionSurface class too and is
# duplicate code. May need to be a simple function.
return np.sqrt(2 * GRAV_ACC * self.fall_height)
def _create_safe_surface(self):
"""Returns the x and y coordinates of the equivalent fall height
landing surface."""
def rhs(x, y):
"""Returns the slope of the safe surface that ensures the impact
speed is equivalent to the impact speed from the equivalent fall
height.
dy
-- = ...
dx
x : integrating through x instead of time
y : single state variable
equivalent to safe_surface.m
integrates from the impact location backwards
If the direction of the velocity vector is known, and the mangitude
at impact is known, and the angle between v and the slope is known,
then we can find out how the slope should be oriented.
"""
# NOTE : y is an array of length 1
y = y[0]
logging.debug('x = {}, y = {}'.format(x, y))
takeoff_speed, impact_vel = self.skier.speed_to_land_at(
(x, y), self.takeoff_point, self.takeoff_angle, self.surf)
if takeoff_speed > 0.0:
impact_speed, impact_angle = vel2speed(*impact_vel)
else: # else takeoff_speed == 0, what about < 0?
impact_speed = self.allowable_impact_speed
impact_angle = -np.pi / 2.0
speed_ratio = self.allowable_impact_speed / impact_speed
logging.debug('speed ratio = {}'.format(speed_ratio))
# beta is the allowed angle between slope and path at speed vImpact
if speed_ratio > 1.0:
beta = np.pi / 2.0 + EPS
else:
beta = np.arcsin(speed_ratio)
logging.debug('impact angle = {} deg'.format(
np.rad2deg(impact_angle)))
logging.debug('beta = {} deg'.format(np.rad2deg(beta)))
safe_surface_angle = beta + impact_angle
logging.debug('safe_surface_angle = {} deg'.format(
| np.rad2deg(safe_surface_angle) | numpy.rad2deg |
import numpy as np
from libensemble.libE import libE
from generator import gen_random_sample
from simulator import sim_find_sine
from libensemble.tools import add_unique_random_streams
nworkers = 4
libE_specs = {'nworkers': nworkers, 'comms': 'local'}
gen_specs = {'gen_f': gen_random_sample, # Our generator function
'out': [('x', float, (1,))], # gen_f output (name, type, size)
'user': {
'lower': | np.array([-3]) | numpy.array |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import time
import unittest
import numpy as np
from arch.api import eggroll
eggroll.init("123")
from federatedml.feature.binning import QuantileBinning
from federatedml.feature.instance import Instance
from federatedml.param.param import FeatureBinningParam
class TestQuantileBinning(unittest.TestCase):
def setUp(self):
# eggroll.init("123")
self.data_num = 1000
self.feature_num = 200
final_result = []
numpy_array = []
for i in range(self.data_num):
tmp = np.random.randn(self.feature_num)
inst = Instance(inst_id=i, features=tmp, label=0)
tmp_pair = (str(i), inst)
final_result.append(tmp_pair)
numpy_array.append(tmp)
table = eggroll.parallelize(final_result,
include_key=True,
partition=10)
self.table = table
self.numpy_table = np.array(numpy_array)
self.cols = [1]
def test_quantile_binning(self):
compress_thres = 10000
head_size = 5000
error = 0.01
bin_num = 10
bin_param = FeatureBinningParam(method='quantile', compress_thres=compress_thres, head_size=head_size,
error=error,
bin_num=bin_num)
quan_bin = QuantileBinning(bin_param)
split_points = quan_bin.binning(self.table, cols=self.cols)
for col_idx, col in enumerate(self.cols):
bin_percent = [i * (1.0 / bin_num) for i in range(1, bin_num)]
x = self.numpy_table[:, col]
x = sorted(x)
for bin_idx, percent in enumerate(bin_percent):
min_rank = int(math.floor(percent * self.data_num - self.data_num * error))
max_rank = int(math.ceil(percent * self.data_num + self.data_num * error))
if min_rank < 0:
min_rank = 0
if max_rank > len(x) - 1:
max_rank = len(x) - 1
try:
self.assertTrue(x[min_rank] <= split_points[col_idx][bin_idx] <= x[max_rank])
except:
print(x[min_rank], x[max_rank], split_points[col_idx][bin_idx])
found_index = x.index(split_points[col_idx][bin_idx])
print("min_rank: {}, found_rank: {}, max_rank: {}".format(
min_rank, found_index, max_rank
))
self.assertTrue(x[min_rank] <= split_points[col_idx][bin_idx] <= x[max_rank])
def tearDown(self):
self.table.destroy()
class TestQuantileBinningSpeed(unittest.TestCase):
def setUp(self):
# eggroll.init("123")
self.data_num = 100000
self.feature_num = 200
final_result = []
numpy_array = []
for i in range(self.data_num):
tmp = | np.random.randn(self.feature_num) | numpy.random.randn |
"""Tests for the atmos_flux_inversion package.
Includes tests using random data, analytic solutions, and checks that
different methods agree for simple problems.
"""
from __future__ import print_function, division
import fractions
import itertools
import operator
import os.path
import atexit
import pickle
import math
import sys
try:
from functools import reduce
except ImportError:
# reduce used to be a builtin
pass
import numpy as np
import numpy.linalg as np_la
import numpy.linalg as la
import numpy.testing as np_tst
import scipy.linalg
import scipy.sparse
import scipy.optimize
# Import from scipy.linalg if not using dask
from scipy.linalg import cholesky
from scipy.sparse.linalg.interface import LinearOperator, MatrixLinearOperator
import unittest2
import pyfftw
import pandas as pd
import xarray
try:
import sparse
HAVE_SPARSE = True
except ImportError:
HAVE_SPARSE = False
import atmos_flux_inversion.optimal_interpolation
import atmos_flux_inversion.correlations
import atmos_flux_inversion.covariances
import atmos_flux_inversion.variational
import atmos_flux_inversion.remapper
import atmos_flux_inversion.wrapper
import atmos_flux_inversion.linalg
import atmos_flux_inversion.noise
import atmos_flux_inversion.psas
import atmos_flux_inversion.util
from atmos_flux_inversion.linalg import tolinearoperator
if os.path.exists(".pyfftw.pickle"):
with open(".pyfftw.pickle", "rb") as wis_in:
WISDOM = pickle.load(wis_in)
if isinstance(WISDOM[0], str):
WISDOM = [wis.encode("ascii")
for wis in WISDOM]
pyfftw.import_wisdom(WISDOM)
del WISDOM, wis_in
def save_wisdom():
"""Save accumulated pyfftw wisdom.
Saves in hidden file in current directory.
Should help speed up subsequent test runs.
"""
with open(".pyfftw.pickle", "wb") as wis_out:
pickle.dump(pyfftw.export_wisdom(), wis_out, 2)
atexit.register(save_wisdom)
del save_wisdom
# If adding other inexact methods to the list tested, be sure to add
# those to the `if "var" in name or "psas" in name` and
# `if "psas" in name` tests as applicable.
ALL_METHODS = (
atmos_flux_inversion.optimal_interpolation.simple,
atmos_flux_inversion.optimal_interpolation.fold_common,
atmos_flux_inversion.optimal_interpolation.save_sum,
atmos_flux_inversion.optimal_interpolation.scipy_chol,
atmos_flux_inversion.variational.simple,
atmos_flux_inversion.variational.incremental,
atmos_flux_inversion.variational.incr_chol,
atmos_flux_inversion.psas.simple,
atmos_flux_inversion.psas.fold_common,
)
ITERATIVE_METHOD_START = 4
"""Where the iterative methods start in the above list.
Used to test failure modes for these solvers.
"""
PRECISE_DTYPE = np.float128
"""The dtype used to represent analytic results.
These are initialized as :class:`fractions.Fraction` then converted to
this dtype for the comparison.
"""
ITERATIVE_STATE_TOLERANCE = 1e-3
ITERATIVE_COVARIANCE_TOLERANCE = 1e-1
EXACT_TOLERANCE = 1e-7
DTYPE = np.float64
"""Default dtype for certain tests."""
def getname(method):
"""Descriptive name for the function.
A name combining the function name and module.
Parameters
----------
method: callable
Returns
-------
name: str
"""
module = method.__module__
group = module.split(".")[-1]
variant = method.__name__
return "{group:s} ({variant:s})".format(group=group,
variant=variant)
def expectFailureIf(condition):
"""Mark a test as XFAIL based on condition.
Wrapper to make :func:`unittest2.expectedFailure` conditional.
Parameters
----------
condition: bool
Returns
-------
decorator: func
"""
if condition:
return unittest2.expectedFailure
return lambda fun: fun
class TestInversionSimple(unittest2.TestCase):
"""Test inversions using simple cases."""
def test_scalar_equal_variance(self):
"""Test a direct measurement of a scalar state."""
bg = np.atleast_1d(2.)
bg_cov = np.atleast_2d(1.)
obs = np.atleast_1d(3.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, 2.5)
np_tst.assert_allclose(post_cov, .5)
def test_scalar_unequal_variance(self):
"""Test assimilation of a direct measurement fo a scalar state.
Variances not equal.
"""
bg = np.atleast_1d(15.)
bg_cov = np.atleast_2d(2.)
obs = np.atleast_1d(14.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
with self.subTest(method=getname(method)):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post, PRECISE_DTYPE(14 + fractions.Fraction(1, 3)))
np_tst.assert_allclose(
post_cov, PRECISE_DTYPE(fractions.Fraction(2, 3)))
def test_multiple_priors(self):
"""Test doing multiple assimilations at once.
Simple test.
"""
bg = np.array([[2., 3.]])
bg_cov = np.atleast_2d(1.)
obs = np.array([[3., 4.]])
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS[:ITERATIVE_METHOD_START]:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, [[2.5, 3.5]])
np_tst.assert_allclose(post_cov, .5)
def test_homework_one(self):
"""Verify that this can reproduce the answers to HW1.
Make sure the answers here are within roundoff of the analytic
solutions.
"""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
# Setup for expected degradation of solutions
name = getname(method)
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
# Also tested above in scalar_unequal_variance
with self.subTest(problem=3):
state_college_index = 1
post, post_cov = method(
bg[state_college_index],
bg_cov[state_college_index, state_college_index],
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, state_college_index])
np_tst.assert_allclose(
post, np.asanyarray(14 + fractions.Fraction(1, 3),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
np_tst.assert_allclose(
post_cov, np.asanyarray(fractions.Fraction(2, 3),
dtype=PRECISE_DTYPE),
rtol=cov_rtol)
with self.subTest(problem=4):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(2, 3),
14 + fractions.Fraction(1, 3),
21 + fractions.Fraction(2, 3)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=5):
pittsburgh_index = 0
post, post_cov = method(
bg, bg_cov,
obs[pittsburgh_index],
obs_cov[pittsburgh_index, pittsburgh_index],
obs_op[pittsburgh_index, :])
np_tst.assert_allclose(
post,
np.asanyarray((18 + fractions.Fraction(2, 3),
15 + fractions.Fraction(1, 3),
22 + fractions.Fraction(1, 6)),
PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=7):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
4 * obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(5, 6),
14 + fractions.Fraction(2, 3),
21 + fractions.Fraction(5, 6)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=8):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
# background correlations make this problem not
# strictly linear, at least without doing
# sequential inversions. Have not verified by hand
np_tst.assert_allclose(
post, np.asanyarray(
(18 + fractions.Fraction(1, 2),
14 + fractions.Fraction(1, 2),
21 + fractions.Fraction(3, 4)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
def test_sequential_assimilations(self):
"""Make sure this follows Bayes' rule."""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
name = getname(method)
if "var" in name.lower() or "psas" in name.lower():
state_rtol = ITERATIVE_STATE_TOLERANCE
cov_rtol = ITERATIVE_COVARIANCE_TOLERANCE
else:
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
inter1, inter_cov1 = method(
bg, bg_cov, obs[0], obs_cov[0, 0],
obs_op[0, :])
post1, post_cov1 = method(
inter1, inter_cov1, obs[1], obs_cov[1, 1],
obs_op[1, :])
post2, post_cov2 = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post1, post2, rtol=state_rtol)
if "psas" in name.lower():
# The second covariance isn't positive definite (one
# positive entry) and no entry shares the order of
# magnitude between the two.
raise unittest2.SkipTest("Known Failure: PSAS Covariances")
np_tst.assert_allclose(
post_cov1, post_cov2, rtol=cov_rtol)
def test_iterative_failures(self):
"""Test failure modes of iterative solvers."""
bg_stds = np.logspace(-8, 1, 10)
bg_corr = scipy.linalg.toeplitz(
np.arange(1, .9, -.01))
bg_cov = np.diag(bg_stds).dot(bg_corr).dot(np.diag(bg_stds))
bg_vals = np.arange(10)
obs_op = np.eye(3, 10)
obs_vals = 10 - np.arange(3)
obs_cov = np.diag((10, 1e-3, 1e-6)) / 8
for method in ALL_METHODS[ITERATIVE_METHOD_START:]:
name = getname(method)
with self.subTest(method=name):
with self.assertRaises(
atmos_flux_inversion.ConvergenceError) as cxt_mgr:
method(bg_vals, bg_cov, obs_vals, obs_cov, obs_op)
conv_err = cxt_mgr.exception
self.assertTrue(hasattr(conv_err, "guess"))
self.assertTrue(hasattr(conv_err, "result"))
self.assertIsInstance(conv_err.result,
scipy.optimize.OptimizeResult)
self.assertTrue(hasattr(conv_err, "hess_inv"))
class TestGaussianNoise(unittest2.TestCase):
"""Test the properties of the gaussian noise."""
def test_ident_cov(self):
"""Test generation with identity as covariance."""
sample_shape = 3
cov = np.eye(sample_shape)
noise = atmos_flux_inversion.noise.gaussian_noise(cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros((sample_shape,)),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), cov,
rtol=1e-2, atol=1e-2)
def test_shape(self):
"""Make sure the returned shapes are correct."""
sample_shape = (3,)
sample_cov = np.eye(sample_shape[0])
for shape in ((), (6,), (2, 3)):
with self.subTest(shape=shape):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, shape)
self.assertEqual(res.shape, shape + sample_shape)
with self.subTest(shape=5):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, 5)
self.assertEqual(res.shape, (5,) + sample_shape)
with self.subTest(shape=None):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, None)
self.assertEqual(res.shape, sample_shape)
def test_operator(self):
"""Test that the code works with operator covariances."""
diagonal = (1, .5, .3, .2, .1)
sample_cov = atmos_flux_inversion.covariances.DiagonalOperator(
diagonal)
sample_shape = (len(diagonal),)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), np.diag(diagonal),
rtol=1e-2, atol=1e-2)
def test_kron_op(self):
"""Test that large kronecker operators don't break the handling."""
op1 = scipy.linalg.toeplitz(.6 ** np.arange(15))
diag = (1, .9, .8, .7, .6, .5, .4, .3, .2, .1)
op2 = atmos_flux_inversion.covariances.DiagonalOperator(diag)
combined = atmos_flux_inversion.util.kronecker_product(op1, op2)
noise = atmos_flux_inversion.noise.gaussian_noise(combined, int(1e5))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(combined.shape[0]),
rtol=1.1e-2, atol=1.1e-2)
np_tst.assert_allclose(np.cov(noise.T),
scipy.linalg.kron(op1, np.diag(diag)),
rtol=3e-2, atol=3e-2)
def test_off_diagonal(self):
"""Test that the code works with off-diagonal elements."""
sample_cov = scipy.linalg.toeplitz((1, .5, .25, .125))
sample_shape = (4,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_slow_decay(self):
"""Test that the code handles slowly-decaying covariances."""
sample_cov = scipy.linalg.toeplitz(.8 ** np.arange(10))
sample_shape = (10,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_fails(self):
"""Test that construction fails on invalid input."""
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.ones(10))
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.eye(3, 2))
class TestCorrelations(unittest2.TestCase):
"""Test the generation of correlation matrices."""
def test_far_correl(self):
"""Test the correlation between points far apart.
Should be zero.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e-8)
corr = corr_fun(1e8)
self.assertAlmostEqual(corr, 0)
def test_near_correl(self):
"""Test 2D correlation between near points.
Should be one.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e8)
corr = corr_fun(1e-8)
self.assertAlmostEqual(corr, 1)
def test_2d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (int(15), int(20))
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
try:
chol_upper = cholesky(corr_mat)
except la.LinAlgError:
self.fail("corr_mat not positive definite")
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_2d_make_matrix(self):
"""Test make_matrix for 2D correlations.
Checks against original value.
This test is really slow.
"""
# 30x25 Gaussian 10 not close
test_nx = 30
test_ny = 20
test_points = test_ny * test_nx
# TODO: speed up
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 15):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, (test_ny, test_nx))
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index,
(test_ny, test_nx, test_ny, test_nx)
).reshape((test_points, test_points)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=1e-5, atol=1e-6)
# check if it actually is positive definite
cholesky(corr_mat)
def test_1d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (200,)
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
# This fails with a correlation length of 5
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_1d_make_matrix(self):
"""Test make_matrix for 1D correlations.
Checks against original value.
"""
test_nt = 200
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 30):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun,
test_nt
)
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index, (test_nt, test_nt)
).reshape((test_nt, test_nt)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=2e-7, atol=5e-7
)
# check if it actually is positive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_fft_correlation_structure(self):
"""Ensure the FFT-based operators satisfy conditions of correlation matrices.
Checks for symmetry and ones on the diagonal.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for test_shape in ((300,), (20, 30)):
test_size = int(np.prod(test_shape, dtype=int))
for dist in (1, 3, 10, 30):
for is_cyclic in (True, False):
corr_fun = corr_class(dist)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, is_cyclic))
# This is the fastest way to get column-major
# order from da.eye.
corr_mat = corr_op.dot(np.eye(test_size).T)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="symmetry"):
np_tst.assert_allclose(corr_mat, corr_mat.T,
rtol=1e-14, atol=1e-15)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="self-correlation"):
np_tst.assert_allclose(np.diag(corr_mat), 1)
def test_1d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec)[noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1.5e-3)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
elif ((corr_class is atmos_flux_inversion.correlations.
BalgovindCorrelation and
dist == 10)):
# This one distance is problematic
# Roughly 3% of the points disagree
# for the last half of the tests
# I have no idea why
raise unittest2.SkipTest(
"Balgovind(10) correlations weird")
np_tst.assert_allclose(
corr_op.solve(
test_vec)[noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=2e-3
)
def test_1d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec),
corr_mat.dot(test_vec),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_2d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
np_tst.assert_allclose(
corr_op.solve(
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
def test_2d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape),
corr_mat.dot(test_vec).reshape(test_shape),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_homogeneous_from_array_cyclic(self):
"""Make sure cyclic from_array can be roundtripped.
Also tests that odd state sizes work.
"""
test_size = 25
corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation
for dist in (1, 3, 5):
with self.subTest(dist=dist):
corr_fun = corr_class(dist)
corr_op1 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_size, True))
first_column = corr_op1.dot(np.eye(test_size, 1)[:, 0])
corr_op2 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(first_column))
np_tst.assert_allclose(
corr_op1.dot(np.eye(test_size)),
corr_op2.dot(np.eye(test_size)))
def test_kron_composition(self):
"""Test that `kron` works similar to composition of the domains."""
HomogeneousIsotropicCorrelation = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation
)
corr_class = atmos_flux_inversion.correlations.GaussianCorrelation
corr_fun = corr_class(5)
shape1 = (5,)
shape2 = (7,)
corr_op1 = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape1))
corr_op2 = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape2))
kron_corr = corr_op1.kron(corr_op2)
direct_corr = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape1 + shape2))
self.assertEqual(kron_corr.shape, direct_corr.shape)
self.assertEqual(kron_corr._underlying_shape,
direct_corr._underlying_shape)
np_tst.assert_allclose(kron_corr._corr_fourier,
direct_corr._corr_fourier)
np_tst.assert_allclose(kron_corr._fourier_near_zero,
direct_corr._fourier_near_zero)
def test_kron_results(self):
"""Test the Kronecker product implementation."""
HomogeneousIsotropicCorrelation = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation)
corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation
test_shapes = (20, 25, (5, 6))
distances = (3, 5,)
for dist1, shape1, dist2, shape2 in itertools.product(
distances, test_shapes, repeat=2):
with self.subTest(dist1=dist1, dist2=dist2):
corr_fun1 = corr_class(dist1)
corr_fun2 = corr_class(dist2)
corr_op1 = (
HomogeneousIsotropicCorrelation.
from_function(corr_fun1, shape1))
corr_op2 = (
HomogeneousIsotropicCorrelation.
from_function(corr_fun2, shape2))
size1 = np.prod(shape1)
size2 = np.prod(shape2)
corr_mat1 = corr_op1.dot(np.eye(size1))
corr_mat2 = corr_op2.dot(np.eye(size2))
full_corr1 = corr_op1.kron(corr_op2)
full_corr2 = scipy.linalg.kron(np.asarray(corr_mat1),
np.asarray(corr_mat2))
self.assertIsInstance(
corr_op1, HomogeneousIsotropicCorrelation)
test_vec = np.arange(size1 * size2)
np_tst.assert_allclose(
full_corr1.dot(test_vec),
full_corr2.dot(test_vec))
test_mat = np.eye(size1 * size2)
np_tst.assert_allclose(
full_corr1.dot(test_mat),
full_corr2.dot(test_mat))
def test_kron_delegate(self):
"""Test that kron delegates where appropriate."""
op1 = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array((1, .5, .25)))
mat2 = np.eye(5)
combined_op = op1.kron(mat2)
self.assertIsInstance(
combined_op,
atmos_flux_inversion.linalg.SchmidtKroneckerProduct
)
def test_sqrt_direct(self):
"""Test the square root in the most direct manner possible.
Checks whether matrices corresponding to sqrt.T@sqrt and the
original matrix are approximately equal.
"""
operator = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array((1, .5, .25, .125)))
sqrt = operator.sqrt()
sqrt_squared = sqrt.T.dot(sqrt)
mat = np.eye(4)
np_tst.assert_allclose(operator.dot(mat),
sqrt_squared.dot(mat))
def test_from_function_direct(self):
"""Directly test the output of from_function."""
corr_func = (atmos_flux_inversion.correlations.
ExponentialCorrelation(1 / np.log(2)))
from_function = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation.
from_function)
toeplitz = scipy.linalg.toeplitz
with self.subTest(is_cyclic=False, nd=1):
corr_op = from_function(corr_func, [10], False)
np_tst.assert_allclose(
corr_op.dot(np.eye(10)),
toeplitz(0.5 ** np.arange(10)))
with self.subTest(is_cyclic=False, nd=2):
corr_op = from_function(corr_func, [2, 3], False)
same_row = toeplitz(0.5 ** np.array([0, 1, 2]))
other_row = toeplitz(
0.5 ** np.array([1, np.sqrt(2), np.sqrt(5)]))
np_tst.assert_allclose(
corr_op.dot(np.eye(6)),
np.block([[same_row, other_row],
[other_row, same_row]]))
corr_op = from_function(corr_func, [4, 6], False)
same_row = toeplitz(0.5 ** np.arange(6))
next_row = toeplitz(
0.5 ** np.array([1, np.sqrt(2), np.sqrt(5),
np.sqrt(10), np.sqrt(17),
np.sqrt(26)]))
row_after_next = toeplitz(
0.5 ** np.array([2, np.sqrt(5), np.sqrt(8),
np.sqrt(13), np.sqrt(20),
np.sqrt(29)]))
two_rows_on = toeplitz(
0.5 ** np.array([3, np.sqrt(10), np.sqrt(13),
np.sqrt(18), 5, np.sqrt(34)]))
np_tst.assert_allclose(
corr_op.dot(np.eye(24)),
np.block([[same_row, next_row, row_after_next, two_rows_on],
[next_row, same_row, next_row, row_after_next],
[row_after_next, next_row, same_row, next_row],
[two_rows_on, row_after_next, next_row, same_row]]))
with self.subTest(is_cyclic=True, nd=1):
corr_op = from_function(corr_func, [10], True)
np_tst.assert_allclose(
corr_op.dot(np.eye(10)),
toeplitz(
0.5 ** np.array([0, 1, 2, 3, 4, 5, 4, 3, 2, 1])))
with self.subTest(is_cyclic=True, nd=2):
corr_op = from_function(corr_func, [4, 6])
same_row = toeplitz(
0.5 ** np.array([0, 1, 2, 3, 2, 1]))
next_row = toeplitz(
0.5 ** np.array([1, np.sqrt(2), np.sqrt(5),
np.sqrt(10), np.sqrt(5), np.sqrt(2)]))
row_after_next = toeplitz(
0.5 ** np.array([2, np.sqrt(5), np.sqrt(8),
np.sqrt(13), np.sqrt(8), np.sqrt(5)]))
np_tst.assert_allclose(
corr_op.dot(np.eye(24)),
np.block([[same_row, next_row, row_after_next, next_row],
[next_row, same_row, next_row, row_after_next],
[row_after_next, next_row, same_row, next_row],
[next_row, row_after_next, next_row, same_row]]))
def test_inv(self):
"""Test inverse matches linalg."""
corr_func = (atmos_flux_inversion.correlations.
ExponentialCorrelation(1 / np.log(2)))
from_function = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation.
from_function)
for test_shape in (10, 11, (3, 3), (4, 4)):
with self.subTest(test_shape=test_shape):
corr_op = from_function(corr_func, test_shape)
test_size = np.prod(test_shape)
ident = np.eye(test_size)
np_tst.assert_allclose(
corr_op.inv().dot(ident),
la.inv(corr_op.dot(ident)),
rtol=1e-5, atol=1e-5)
def test_acyclic_inv_fails(self):
"""Test inverse fails for acyclic correlations."""
corr_func = (atmos_flux_inversion.correlations.
ExponentialCorrelation(1 / np.log(2)))
from_function = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation.
from_function)
for test_shape in (10, 11, (3, 3), (4, 4)):
with self.subTest(test_shape=test_shape):
corr_op = from_function(corr_func, test_shape,
is_cyclic=False)
self.assertRaises(
NotImplementedError,
corr_op.inv)
def test_wrong_shape_fails(self):
"""Test that a vector of the wrong shape fails noisily."""
corr_func = (atmos_flux_inversion.correlations.
ExponentialCorrelation(2))
corr_op = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation.
from_function(corr_func, (3, 4)))
self.assertRaises(
ValueError,
corr_op.solve,
np.arange(5))
def test_cyclic_from_array(self):
"""Test from_array with assumed cyclic correlations."""
array = [1, .5, .25, .125, .0625, .125, .25, .5]
op = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(array))
mat = scipy.linalg.toeplitz(array)
np_tst.assert_allclose(op.dot(np.eye(*mat.shape)),
mat)
def test_acyclic_from_array(self):
"""Test from_array with correlations assumed acyclic."""
array = [1, .5, .25, .125, .0625, .03125]
op = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(array, False))
mat = scipy.linalg.toeplitz(array)
np_tst.assert_allclose(op.dot(np.eye(*mat.shape)),
mat)
@unittest2.skipUnless(HAVE_SPARSE, "sparse not installed")
def test_sparse(self):
"""Test HomogeneousIsotropicCorrelations work on sparse.COO."""
array = 2. ** -np.arange(6)
op = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(array, False))
mat = scipy.linalg.toeplitz(array)
np_tst.assert_allclose(op.dot(sparse.eye(*mat.shape)),
mat)
class TestSchmidtKroneckerProduct(unittest2.TestCase):
"""Test the Schmidt Kronecker product implementation for LinearOperators.
This class tests the implementation based on the Schmidt decomposition.
"""
def test_identity(self):
"""Test that the implementation works with identity matrices."""
test_sizes = (4, 5)
SchmidtKroneckerProduct = (
atmos_flux_inversion.linalg.SchmidtKroneckerProduct)
# I want to be sure either being smaller works.
# Even versus odd also causes problems occasionally
for size1, size2 in itertools.product(test_sizes, repeat=2):
with self.subTest(size1=size1, size2=size2):
mat1 = np.eye(size1)
mat2 = np.eye(size2)
full_mat = SchmidtKroneckerProduct(
mat1, mat2)
big_ident = np.eye(size1 * size2)
np_tst.assert_allclose(
full_mat.dot(big_ident),
big_ident)
def test_identical_submatrices(self):
"""Test whether the implementation will generate identical blocks."""
mat1 = np.ones((3, 3))
mat2 = ((1, .5, .25), (.5, 1, .5), (.25, .5, 1))
np_tst.assert_allclose(
atmos_flux_inversion.linalg.SchmidtKroneckerProduct(
mat1, mat2).dot(np.eye(9)),
np.tile(mat2, (3, 3)))
def test_constant_blocks(self):
"""Test whether the implementation will produce constant blocks."""
mat1 = ((1, .5, .25), (.5, 1, .5), (.25, .5, 1))
mat2 = np.ones((3, 3))
np_tst.assert_allclose(
atmos_flux_inversion.linalg.SchmidtKroneckerProduct(
mat1, mat2).dot(np.eye(9)),
np.repeat(np.repeat(mat1, 3, 0), 3, 1))
def test_entangled_state(self):
"""Test whether the implementation works with entangled states."""
sigmax = np.array(((0, 1), (1, 0)))
sigmaz = np.array(((1, 0), (0, -1)))
operator = atmos_flux_inversion.linalg.SchmidtKroneckerProduct(
sigmax, sigmaz)
matrix = scipy.linalg.kron(sigmax, sigmaz)
# (k01 - k10) / sqrt(2)
epr_state = (0, .7071, -.7071, 0)
np_tst.assert_allclose(
operator.dot(epr_state),
matrix.dot(epr_state))
def test_drop_small(self):
"""Test that the implementation properly drops small components."""
SchmidtKroneckerProduct = (
atmos_flux_inversion.linalg.SchmidtKroneckerProduct)
# I want to be sure either being smaller works.
# Even versus odd also causes problems occasionally
mat1 = np.eye(2)
mat2 = np.eye(3)
full_mat = SchmidtKroneckerProduct(
mat1, mat2)
test_vec = np.array([1, 0, 0,
0, 1e-15, 0])
np_tst.assert_allclose(
full_mat.dot(test_vec),
np.eye(6, 1)[:, 0])
def test_transpose(self):
"""Test that SchmidtKroneckerProduct can be transposed."""
mat1 = np.eye(2)
mat2 = np.eye(3)
op = atmos_flux_inversion.linalg.SchmidtKroneckerProduct(
mat1, mat2
)
op_transpose = op.T
np_tst.assert_allclose(
op_transpose.dot(np.eye(6)),
np.eye(6))
class TestYMKroneckerProduct(unittest2.TestCase):
"""Test the YM13 Kronecker product implementation for LinearOperators.
This tests the
:class:`~atmos_flux_inversion.linalg.DaskKroneckerProductOperator`
implementation based on the algorithm in Yadav and Michalak (2013)
"""
def test_identity(self):
"""Test that the implementation works with identity matrices."""
test_sizes = (4, 5)
DaskKroneckerProductOperator = (
atmos_flux_inversion.linalg.DaskKroneckerProductOperator)
# I want to be sure either being smaller works.
# Even versus odd also causes problems occasionally
for size1, size2 in itertools.product(test_sizes, repeat=2):
with self.subTest(size1=size1, size2=size2):
mat1 = np.eye(size1)
mat2 = np.eye(size2)
full_mat = DaskKroneckerProductOperator(
mat1, mat2)
big_ident = np.eye(size1 * size2)
np_tst.assert_allclose(
full_mat.dot(big_ident),
big_ident)
def test_identical_submatrices(self):
"""Test whether the implementation will generate identical blocks."""
mat1 = np.ones((3, 3))
mat2 = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1)))
np_tst.assert_allclose(
atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
mat1, mat2).dot(np.eye(9)),
np.tile(mat2, (3, 3)))
def test_constant_blocks(self):
"""Test whether the implementation will produce constant blocks."""
mat1 = np.array(((1, .5, .25), (.5, 1, .5), (.25, .5, 1)))
mat2 = np.ones((3, 3))
np_tst.assert_allclose(
atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
mat1, mat2).dot(np.eye(9)),
np.repeat(np.repeat(mat1, 3, 0), 3, 1))
def test_entangled_state(self):
"""Test whether the implementation works with entangled states."""
sigmax = np.array(((0, 1), (1, 0)))
sigmaz = np.array(((1, 0), (0, -1)))
operator = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
sigmax, sigmaz)
matrix = scipy.linalg.kron(sigmax, sigmaz)
# (k01 - k10) / sqrt(2)
epr_state = (0, .7071, -.7071, 0)
np_tst.assert_allclose(
operator.dot(epr_state),
matrix.dot(epr_state))
@unittest2.skipUnless(HAVE_SPARSE, "sparse not installed")
def test_sparse(self):
"""Test that DaskKroneckerProductOperator works on sparse.COO."""
sigmax = np.array(((0, 1), (1, 0)))
sigmaz = np.array(((1, 0), (0, -1)))
operator = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
sigmax, sigmaz)
matrix = scipy.linalg.kron(sigmax, sigmaz)
epr_state = np.array((0, .7071, -.7071, 0))
np_tst.assert_allclose(
operator.dot(sparse.COO(epr_state)),
matrix.dot(epr_state))
def test_transpose(self):
"""Test whether the transpose is properly implemented."""
mat1 = np.eye(3)
mat2 = atmos_flux_inversion.covariances.DiagonalOperator((1, 1))
mat3 = np.eye(4)
DaskKroneckerProductOperator = (
atmos_flux_inversion.linalg.DaskKroneckerProductOperator)
with self.subTest(check="symmetric"):
product = DaskKroneckerProductOperator(
mat1, mat2)
self.assertIs(product.T, product)
with self.subTest(check="asymmetric1"):
mat1[0, 1] = 1
product = DaskKroneckerProductOperator(
mat1, mat2)
transpose = product.T
self.assertIsNot(transpose, product)
np_tst.assert_allclose(transpose._operator1,
mat1.T)
with self.subTest(check="asymmetric2"):
product = DaskKroneckerProductOperator(
mat3, mat1)
transpose = product.T
self.assertIsNot(transpose, product)
self.assertIs(transpose._operator1, mat3)
np_tst.assert_allclose(transpose._operator2.A,
mat1.T)
with self.subTest(check="asymmetric3"):
product = DaskKroneckerProductOperator(
mat1, mat1)
transpose = product.T
np_tst.assert_allclose(transpose._operator1,
mat1.T)
np_tst.assert_allclose(transpose._operator2.A,
mat1.T)
with self.subTest(check="rectangular"):
product = DaskKroneckerProductOperator(
mat1[:2], mat3[:3])
transpose = product.T
np_tst.assert_allclose(transpose._operator1,
mat1[:2].T)
np_tst.assert_allclose(transpose._operator2.A,
mat3[:3].T)
def test_sqrt(self):
"""Test whether the sqrt method works as intended."""
matrix1 = np.eye(2)
matrix2 = atmos_flux_inversion.covariances.DiagonalOperator((1, 2, 3))
tester = np.eye(6)
product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
matrix1, matrix2)
sqrt = product.sqrt()
proposed = sqrt.T.dot(sqrt)
np_tst.assert_allclose(proposed.dot(tester), product.dot(tester))
# Should I check the submatrices or assume that's covered?
def test_quadratic_form(self):
"""Test whether quadratic_form returns the intended result."""
matrix1 = scipy.linalg.toeplitz((1., 1/3., 1/9., 1/27., 1/81.)) # noqa
matrix2 = scipy.linalg.toeplitz((1., .5, .25, .125, .0625, .03125))
product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
matrix1, matrix2)
tester = np.eye(product.shape[0])
dense_product = scipy.linalg.kron(matrix1, matrix2)
test_vec = np.arange(product.shape[0])
np_tst.assert_allclose(product.quadratic_form(tester),
dense_product)
np_tst.assert_allclose(product.quadratic_form(test_vec),
test_vec.dot(dense_product.dot(test_vec)))
test_op = atmos_flux_inversion.linalg.DiagonalOperator(test_vec)
self.assertRaises(
TypeError,
product.quadratic_form,
test_op)
self.assertRaises(
ValueError,
product.quadratic_form,
test_vec[:-1])
@unittest2.skipUnless(HAVE_SPARSE, "sparse not installed")
def test_quadratic_form_sparse(self):
"""Test that quadratic_form works on sparse.COO."""
matrix1 = scipy.linalg.toeplitz(3. ** -np.arange(4))
matrix2 = scipy.linalg.toeplitz(5. ** -np.arange(5))
product = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
matrix1, matrix2)
tester = sparse.eye(product.shape[0])
dense_product = scipy.linalg.kron(matrix1, matrix2)
np_tst.assert_allclose(product.quadratic_form(tester),
dense_product)
def test_matrix_linop(self):
"""Test that the implementation works with MatrixLinearOperator."""
test_sizes = (4, 5)
DaskKroneckerProductOperator = (
atmos_flux_inversion.linalg.DaskKroneckerProductOperator)
# I want to be sure either being smaller works.
# Even versus odd also causes problems occasionally
for size1, size2 in itertools.product(test_sizes, repeat=2):
with self.subTest(size1=size1, size2=size2):
mat1 = tolinearoperator(np.eye(size1))
mat2 = np.eye(size2)
full_mat = DaskKroneckerProductOperator(
mat1, mat2)
big_ident = np.eye(size1 * size2)
np_tst.assert_allclose(
full_mat.dot(big_ident),
big_ident)
def test_fails_not_array(self):
"""Test for failure if the first operator is not an array.
The implementation requires it. The implementation should
fail quickly, not slowly.
"""
mat1 = atmos_flux_inversion.linalg.DiagonalOperator(np.arange(10))
mat2 = np.eye(3)
self.assertRaises(
ValueError,
atmos_flux_inversion.linalg.DaskKroneckerProductOperator,
mat1, mat2)
def test_sqrt_fails(self):
"""Test that the square root fails for bad inputs.
Specifically, non-square arrays and asymmetric arrays.
"""
kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator
self.assertRaises(
ValueError,
kron_op(np.eye(3, 2), np.eye(3)).sqrt)
self.assertRaises(
ValueError,
kron_op(np.eye(3), np.eye(2, 3)).sqrt)
self.assertRaises(
ValueError,
kron_op(np.array([[1, 1], [0, 1]]), np.eye(3)).sqrt)
@unittest2.skipUnless(HAVE_SPARSE, "sparse not installed")
def test_sparse_first_argument(self):
"""Test sparse.COO in the first position."""
row = np.exp(-np.arange(20))
row[row < 0.005] = 0
matrix1 = scipy.linalg.toeplitz(row)
operator1 = sparse.COO(matrix1)
operator2 = sparse.eye(15)
kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
operator1, operator2)
kron_mat = scipy.linalg.kron(matrix1, operator2.todense())
np_tst.assert_allclose(
kron_op.dot(np.eye(kron_op.shape[0])),
kron_mat)
np_tst.assert_allclose(
kron_op.dot(sparse.eye(kron_op.shape[0])).todense(),
kron_mat)
@unittest2.skipUnless(HAVE_SPARSE, "sparse not installed")
@unittest2.expectedFailure
def test_sparse_kron_quadratic_form(self):
"""Test that quadratic form of all sparse works."""
row = np.exp(-np.arange(20))
row[row < 0.005] = 0
matrix1 = scipy.linalg.toeplitz(row)
operator1 = sparse.COO(row)
operator2 = sparse.eye(15)
kron_op = atmos_flux_inversion.linalg.DaskKroneckerProductOperator(
operator1, operator2)
kron_mat = scipy.linalg.kron(matrix1, operator2.todense())
np_tst.assert_allclose(
kron_op.quadratic_form(sparse.eye(kron_op.shape[0])).todense(),
kron_mat)
class TestUtilKroneckerProduct(unittest2.TestCase):
"""Test atmos_flux_inversion.util.kronecker_product."""
def test_delegation(self):
"""Test that it delegates to subclasses where appropriate."""
HomogeneousIsotropicCorrelation = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation)
corr_class = atmos_flux_inversion.correlations.GaussianCorrelation
corr_fun = corr_class(5)
op1 = HomogeneousIsotropicCorrelation.from_function(corr_fun, 15)
op2 = HomogeneousIsotropicCorrelation.from_function(corr_fun, 20)
combined_op = atmos_flux_inversion.util.kronecker_product(op1, op2)
proposed_result = HomogeneousIsotropicCorrelation.from_function(
corr_fun, (15, 20))
self.assertIsInstance(combined_op, HomogeneousIsotropicCorrelation)
self.assertSequenceEqual(combined_op.shape,
tuple(np.multiply(op1.shape, op2.shape)))
self.assertEqual(combined_op._underlying_shape,
proposed_result._underlying_shape)
np_tst.assert_allclose(combined_op._fourier_near_zero,
proposed_result._fourier_near_zero)
np_tst.assert_allclose(combined_op._corr_fourier,
proposed_result._corr_fourier,
rtol=1e-5, atol=1e-6)
def test_array_array(self):
"""Test array-array Kronecker product."""
mat1 = np.eye(2)
mat2 = np.eye(3)
combined_op = atmos_flux_inversion.util.kronecker_product(mat1, mat2)
self.assertIsInstance(combined_op, np.ndarray)
self.assertSequenceEqual(combined_op.shape,
tuple(np.multiply(mat1.shape, mat2.shape)))
np_tst.assert_allclose(combined_op, scipy.linalg.kron(mat1, mat2))
def test_large_array_array(self):
"""Test large array-array Kronecker products.
At some point it becomes faster to use Y&M kronecker
representation than the dense one.
"""
mat1 = np.eye(1 << 5)
mat2 = np.eye(1 << 6)
combined = atmos_flux_inversion.util.kronecker_product(mat1, mat2)
self.assertIsInstance(
combined, atmos_flux_inversion.linalg.DaskKroneckerProductOperator)
self.assertSequenceEqual(combined.shape,
tuple(np.multiply(mat1.shape, mat2.shape)))
def test_array_sparse(self):
"""Test array-sparse matrix Kronecker products."""
mat1 = np.eye(3)
mat2 = scipy.sparse.eye(10)
combined_op = atmos_flux_inversion.util.kronecker_product(mat1, mat2)
big_ident = np.eye(30)
self.assertIsInstance(
combined_op,
atmos_flux_inversion.linalg.DaskKroneckerProductOperator
)
self.assertSequenceEqual(combined_op.shape,
tuple(np.multiply(mat1.shape, mat2.shape)))
np_tst.assert_allclose(combined_op.dot(big_ident),
big_ident)
def test_linop_array(self):
"""Test linop-sparse Kronecker products."""
op1 = atmos_flux_inversion.linalg.DiagonalOperator(np.arange(15))
mat2 = np.eye(10)
combined_op = atmos_flux_inversion.util.kronecker_product(op1, mat2)
self.assertIsInstance(
combined_op,
atmos_flux_inversion.linalg.SchmidtKroneckerProduct
)
self.assertSequenceEqual(combined_op.shape,
tuple(np.multiply(op1.shape, mat2.shape)))
class TestUtilSchmidtDecomposition(unittest2.TestCase):
"""Test the Schimdt decomposition code in atmos_flux_inversion.linalg."""
def setUp(self):
"""Set up the test vectors."""
from scipy.linalg import kron
# The notation here is borrowed from quantum computation. I
# use the k prefix to indicate the vector has precisely one
# nonzero entry, a one. The digits following are the binary
# representation of the zero-based index of that one.
self.k0 = np.array((1, 0)).reshape(-1, 1)
self.k1 = np.array((0, 1)).reshape(-1, 1)
self.k00 = kron(self.k0, self.k0)
self.k01 = kron(self.k0, self.k1)
self.k10 = kron(self.k1, self.k0)
self.k11 = kron(self.k1, self.k1)
self.k000 = kron(self.k0, self.k00)
self.k001 = kron(self.k0, self.k01)
self.k010 = kron(self.k0, self.k10)
self.k011 = kron(self.k0, self.k11)
self.k100 = kron(self.k1, self.k00)
self.k101 = kron(self.k1, self.k01)
self.k110 = kron(self.k1, self.k10)
self.k111 = kron(self.k1, self.k11)
def test_simple_combinations(self):
"""Test many combinations of vectors."""
possibilities = (
self.k0, self.k1,
self.k00, self.k01, self.k10, self.k11)
for vec1, vec2 in itertools.product(possibilities, possibilities):
with self.subTest(vec1=vec1[:, 0], vec2=vec2[:, 0]):
composite_state = scipy.linalg.kron(vec1, vec2)
lambdas, vecs1, vecs2 = (
atmos_flux_inversion.linalg.schmidt_decomposition(
composite_state, vec1.shape[0], vec2.shape[0]))
np_tst.assert_allclose(np.nonzero(lambdas),
[[0]])
np_tst.assert_allclose(np.abs(vecs1[0]),
vec1[:, 0])
np_tst.assert_allclose(np.abs(vecs2[0]),
vec2[:, 0])
np_tst.assert_allclose(
lambdas[0] *
scipy.linalg.kron(
np.asarray(vecs1[:1].T),
np.asarray(vecs2[:1].T)),
composite_state)
def test_composite_compination(self):
"""Test composite combinations."""
sqrt2 = math.sqrt(2)
rsqrt2 = 1 / sqrt2
# b00 = (k00 + k11) / sqrt2
# b01 = (k00 - k11) / sqrt2
# b10 = (k01 + k10) / sqrt2
# b11 = (k01 - k10) / sqrt2
composite_state = (
scipy.linalg.kron(self.k0, self.k00) +
scipy.linalg.kron(self.k1, self.k01)) / sqrt2
res_lambda, res_vec1, res_vec2 = (
atmos_flux_inversion.linalg.schmidt_decomposition(
composite_state, 2, 4))
self.assertEqual(res_vec1.shape, (2, 2))
self.assertEqual(res_vec2.shape, (2, 4))
np_tst.assert_allclose(res_lambda, (rsqrt2, rsqrt2))
np_tst.assert_allclose(
sum(lambd * scipy.linalg.kron(
np.asarray(vec1).reshape(-1, 1),
np.asarray(vec2).reshape(-1, 1))
for lambd, vec1, vec2 in zip(res_lambda, res_vec1, res_vec2)),
composite_state)
def test_epr_state(self):
"""Test that it correctly decomposes the EPR state."""
sqrt2o2 = math.sqrt(2) / 2
epr_state = (self.k01 - self.k10) * sqrt2o2
lambdas, vecs1, vecs2 = (
atmos_flux_inversion.linalg.schmidt_decomposition(
epr_state, 2, 2
)
)
lambdas = np.asarray(lambdas)
vecs1 = np.asarray(vecs1)
vecs2 = np.asarray(vecs2)
self.assertEqual(len(lambdas), 2)
# This will not recover the original decomposition
np_tst.assert_allclose(lambdas, (sqrt2o2, sqrt2o2))
self.assertAlmostEqual(np.prod(lambdas), .5)
for vec1, vec2 in zip(vecs1, vecs2):
if np.allclose(np.abs(vec1), self.k0[:, 0]):
sign = 1
else:
sign = -1
| np_tst.assert_allclose(vec1, sign * vec2[-1::-1]) | numpy.testing.assert_allclose |
from copy import copy
import mdtraj as md
import subprocess
import os
from urllib.request import urlretrieve
from tempfile import TemporaryDirectory
import numpy as np
from itertools import tee, count
import mdtraj.core.element as elem
from re import sub
import contextlib
import pathlib
from pathlib import Path
aa_tlc = [
'ala', 'arg', 'asn', 'asp', 'cys', 'glu', 'gln', 'gly', 'his',
'ile', 'leu', 'lys', 'met', 'phe', 'pro', 'ser', 'thr', 'trp',
'tyr', 'val'
]
aa_olc = [
'a', 'r', 'n', 'd', 'c', 'e', 'q', 'g', 'h',
'i', 'l', 'k', 'm', 'f', 'p', 's', 't', 'w',
'y', 'v'
]
aa_olc2tlc = {olc: tlc for olc, tlc in zip(aa_olc, aa_tlc)}
aa_tlc2olc = {tlc: olc for olc, tlc in zip(aa_olc, aa_tlc)}
def rand_rotation_matrices(num=1, deflection=1.0):
"""
Creates an array of random rotation matrices.
num: number of rotation matrices to generate
deflection: the magnitude of the rotation. For 0, no rotation; for 1,
completely randomrotation. Small deflection => small perturbation.
Adapted from
http://blog.lostinmyterminal.com/python/2015/05/12/random-rotation-matrix.html
"""
randnums = np.random.uniform(size=(3, num))
theta, phi, z = randnums
theta = theta * 2.0*deflection*np.pi # Rotation about the pole (Z).
phi = phi * 2.0*np.pi # For direction of pole deflection.
z = z * 2.0*deflection # For magnitude of pole deflection.
# Compute a vector V used for distributing points over the sphere
# via the reflection I - V Transpose(V). This formulation of V
# will guarantee that if x[1] and x[2] are uniformly distributed,
# the reflected points will be uniform on the sphere. Note that V
# has length sqrt(2) to eliminate the 2 in the Householder matrix.
r = np.sqrt(z)
Vx, Vy, Vz = V = np.stack([
| np.sin(phi) | numpy.sin |
#! /usr/bin/env python
import numpy as np
from landlab.components.flexure import Flexure
from landlab import RasterModelGrid
def get_random_load_locations(shape, n_loads):
return np.random.random_integers(0, shape[0] * shape[1] - 1, n_loads)
def get_random_load_magnitudes(n_loads):
return | np.random.normal(1e3, 10e7, n_loads) | numpy.random.normal |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
"""Common code for pose-from-action"""
import numpy as np
def one_hot_cat(array, num_choices):
# converts integer class vector ot one-hot
rng = | np.arange(num_choices) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 13 11:56:44 2022
@author: dleon
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from scipy.stats import norm,gamma,cauchy,invgauss,norminvgauss,\
geninvgauss,poisson
from ambit_stochastics.trawl import trawl
#from fit_distr_for_tests import fit_trawl_distribution_aux
#from ambit_stochastics import trawl
#trawl simulation tests
def check_values(trawl_,simulations_to_use=[1,2,-2,-1]):
if trawl_.nr_trawls > 5000:
values = trawl_.values[:,:500]
times = trawl_.tau * np.arange(1,500+1,1)
else:
values, times = trawl_.values, trawl_.tau * np.arange(1,trawl_.nr_trawls+1,1)
f,ax = plt.subplots(2,2,figsize = (24,20),sharex= True)
ax[0,0].plot(times, values[simulations_to_use[0]])
ax[0,1].plot(times, values[simulations_to_use[1]])
ax[1,0].plot(times, values[simulations_to_use[2]])
ax[1,1].plot(times, values[simulations_to_use[3]])
ax[1,0].set_xlabel('time')
ax[1,1].set_xlabel('time')
f.suptitle('Sample paths of the trawl process')
def check_acf(trawl_, simulation_to_use=0, lags=20):
"""plot_acf produces a horizontal line at y-0, can t figure out how to eliminate it from the plot"""
values = trawl_.values[simulation_to_use]
times = trawl_.tau * np.arange(1,trawl_.nr_trawls+1,1)
fig_acf, ax_acf = plt.subplots(1,1,figsize=(12,6))
plot_acf(values, lags = lags-1, ax=ax_acf, color = 'blue', label='empirical')
ax_acf.set_xlabel('lag')
x = np.arange(1,lags,1)
y = trawl_.theoretical_acf(np.arange(1,lags,1)*trawl_.tau)
ax_acf.scatter(x,y.values(),marker = "*", color = 'r',s = 300,alpha = 0.5,label='theoretical')
ax_acf.legend()
def check_trawl_slice(trawl_slice):
check_values(trawl_slice)
check_acf(trawl_slice,simulation_to_use = 1,lags=20)
check_acf(trawl_slice,simulation_to_use = 7,lags=20)
check_acf(trawl_slice,simulation_to_use = 12,lags=20)
check_acf(trawl_slice,simulation_to_use = -5,lags=20)
def check_trawl_gaussian_part(trawl_):
a = [norm.fit(data = trawl_.gaussian_values[simulation,:]) for simulation in range(trawl_.nr_simulations)]
total_area = quad(trawl_.trawl_function,a=-np.inf,b=0)[0]
a = np.array(a) / np.array([total_area, total_area ** 0.5])
f,ax= plt.subplots(1,2,sharey=True, tight_layout=True)
ax[0].hist(a[:,0],density=True)
ax[0].set_title('infered means and true value')
ax[1].hist(a[:,1],density=True)
ax[1].set_title('infered sd and true value')
ax[0].axvline(x=trawl_.gaussian_part_params[0],color='r')
ax[1].axvline(x=trawl_.gaussian_part_params[1],color='r')
def check_trawl_jump_part_distribution(trawl_):
total_area = quad(trawl_.trawl_function,a=-np.inf,b=0)[0]
if trawl_.jump_part_name == 'gamma':
a = [gamma.fit(data = simulation,floc=0) for simulation in trawl_.jump_values]
a = np.array([[i[0],i[2]] for i in a]) #a, scale
a = a / | np.array([total_area,1]) | numpy.array |
import numpy as np
import astropy.io.fits as fits
from amglib.imageutils import *
import tifffile as tiff
from tqdm.notebook import tqdm
def readImage(fname) :
ext = fname.split('.')[-1]
img = []
if ext == 'tif' :
img = tiff.imread(fname).astype('float32')
if ext == 'fits' :
img = fits.getdata(fname,ext=0).astype('float32')
return img
def readImages(fname,first,last, average = 'none', averageStack=False, stride=1, count=1, size = 5) :
tmp = readImage(fname.format(first))
img = | np.zeros([(last-first+1) // stride,tmp.shape[0],tmp.shape[1]],dtype='float32') | numpy.zeros |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
from torchvision import transforms
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
import sys
sys.path.insert(0, './lib/model/unit')
from model.unit.utils import get_config, pytorch03_to_pytorch04
from model.unit.trainer import MUNIT_Trainer, UNIT_Trainer
from model.unit.networks_test import VAEGenA, VAEGenB
import torchvision.utils as vutils
from PIL import Image
from copy import deepcopy
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet_dual import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
parser.add_argument('--config', default='./lib/model/unit/configs/unit_rgb2thermal_folder.yaml', type=str, help="net configuration")
parser.add_argument('--input', default=None, type=str, help="input image path")
parser.add_argument('--output_folder', default='.', type=str, help="output image path")
parser.add_argument('--checkpoint_unit', default='./lib/model/unit/models/rgb2thermal.pt', type=str, help="checkpoint of autoencoders")
parser.add_argument('--style', type=str, default='', help="style image path")
parser.add_argument('--a2b', type=int, default=0, help="1 for a2b and others for b2a")
parser.add_argument('--seed', type=int, default=10, help="random seed")
parser.add_argument('--num_style',type=int, default=10, help="number of styles to sample")
parser.add_argument('--synchronized', action='store_true', help="whether use synchronized style code or not")
parser.add_argument('--output_only', action='store_true', help="whether use synchronized style code or not")
parser.add_argument('--output_path', type=str, default='.', help="path for logs, checkpoints, and VGG model weight")
parser.add_argument('--trainer', type=str, default='UNIT', help="MUNIT|UNIT")
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
# def get_unit_models(opts):
# config = get_config(opts.config)
# opts.num_style = 1 if opts.style != '' else opts.num_style
# config['vgg_model_path'] = opts.output_path
# trainer = UNIT_Trainer(config)
# try:
# state_dict = torch.load(opts.checkpoint_unit)
# trainer.gen_a.load_state_dict(state_dict['a'])
# trainer.gen_b.load_state_dict(state_dict['b'])
# except:
# state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint_unit))
# trainer.gen_a.load_state_dict(state_dict['a'])
# trainer.gen_b.load_state_dict(state_dict['b'])
# trainer.cuda()
# trainer.eval()
# encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
# style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function
# decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function
# return encode, decode
class Resize_GPU(nn.Module):
def __init__(self, h, w):
super(Resize_GPU, self).__init__()
self.op = nn.AdaptiveAvgPool2d((h,w))
def forward(self, x):
x = self.op(x)
return x
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
load_name_gen_a = os.path.join(input_dir,
'gen_a_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
load_name_gen_b = os.path.join(input_dir,
'gen_b_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net in ['res101_unit_update', 'res101_unit_update_coco', 'res101_unit_update_coco_final']:
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
config = get_config(args.config)
gen_a = VAEGenA(config['input_dim_a'], config['gen'])
gen_b = VAEGenB(config['input_dim_b'], config['gen'])
checkpoint_a = torch.load(load_name_gen_a)
checkpoint_b = torch.load(load_name_gen_b)
gen_a.load_state_dict(checkpoint_a['model'])
gen_b.load_state_dict(checkpoint_b['model'])
gen_a = gen_a.cuda()
gen_b = gen_b.cuda()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter(f'logs/{cfg.EXP_DIR}_test/')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_shape = im_data.size()
nw_resize = Resize_GPU(im_shape[2], im_shape[3])
content, _ = gen_b(im_data)
outputs = gen_a(content)
im_data_1 = (outputs + 1) / 2.
im_data_1 = nw_resize(im_data_1)
lines = []
with open('/content/MMTOD/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt') as f:
lines = f.readlines()
#print(lines)
path = './flir2'
img = im_data_1.clone().detach()
tensor = img.cpu().numpy()
tensor2 = | np.squeeze(tensor, axis=0) | numpy.squeeze |
import gym
from gym.spaces import Box, Dict
import string
from env.utils import *
from env.utils import compute_graph
import heft
class DAGEnv(gym.Env):
def __init__(self, n, node_types, window, env_type, noise=False):
if isinstance(node_types, int):
p = node_types
node_types = np.ones(p)
else:
p = len(node_types)
self.observation_space = Dict
self.action_space = "Graph"
self.noise = noise
self.time = 0
self.num_steps = 0
self.p = p
self.n = n
self.window = window
self.env_type = env_type
if self.env_type == 'LU':
self.max_duration_cpu = max(durations_cpu_lu)
self.max_duration_gpu = max(durations_gpu_lu)
self.task_data = ggen_denselu(self.n, self.noise)
elif self.env_type == 'QR':
self.max_duration_cpu = max(durations_cpu_qr)
self.max_duration_gpu = max(durations_gpu_qr)
self.task_data = ggen_QR(self.n, self.noise)
elif self.env_type == 'chol':
self.max_duration_cpu = max(durations_cpu)
self.max_duration_gpu = max(durations_gpu)
self.task_data = ggen_cholesky(self.n, self.noise)
else:
raise EnvironmentError('not implemented')
self.num_nodes = self.task_data.num_nodes
self.sum_task = torch.sum(self.task_data.x, dim=0)
self.norm_desc_features = self.task_data.add_features_descendant()[0] / self.sum_task
self.cluster = Cluster(node_types=node_types.astype(int), communication_cost=np.zeros((p, p)))
self.running = -1 * np.ones(p) # array of task number
self.running_task2proc = {}
self.ready_proc = np.zeros(p) # for each processor, the time where it becomes available
self.ready_tasks = []
self.processed = {}
self.compeur_task = 0
self.current_proc = 0
self.is_homogene = (np.mean(self.cluster.node_types) - 1) * np.mean(self.cluster.node_types) == 0
self.critic_path_duration = None
self.total_work_normalized = None
# self.task_to_CP = np.zeros(len(self.task_graph.task_list))
# compute heft
string_cluster = string.printable[:self.p]
dic_heft = {}
for edge in np.array(self.task_data.edge_index.t()):
dic_heft[edge[0]] = dic_heft.get(edge[0], ()) + (edge[1],)
def compcost(job, agent):
idx = string_cluster.find(agent)
duration = self.task_data.task_list[job].durations[self.cluster.node_types[idx]]
return duration
def commcost(ni, nj, A, B):
return 0
orders, jobson = heft.schedule(dic_heft, string_cluster, compcost, commcost)
try:
self.heft_time = orders[jobson[self.num_nodes - 1]][-1].end
except:
# ok if test
self.heft_time = max([v[-1] for v in orders.values() if len(v) > 0])
def reset(self):
# self.task_data = random_ggen_fifo(self.n, self.max_in, self.max_out, self.noise)
if self.env_type == 'LU':
self.task_data = ggen_denselu(self.n, self.noise)
elif self.env_type == 'QR':
self.task_data = ggen_QR(self.n, self.noise)
elif self.env_type == 'chol':
self.task_data = ggen_cholesky(self.n, self.noise)
else:
raise EnvironmentError('not implemented')
self.time = 0
self.num_steps = 0
self.running = -1 * np.ones(self.p).astype(int)
self.running_task2proc = {}
self.ready_proc = np.zeros(self.p)
# self.ready_tasks.append(0)
self.current_proc = 0
# compute initial doable tasks
new_ready_tasks = torch.arange(0, self.num_nodes)[torch.logical_not(isin(torch.arange(0, self.num_nodes), self.task_data.edge_index[1, :]))]
self.ready_tasks = new_ready_tasks.tolist()
self.processed = {}
self.compeur_task = 0
# if self.noise > 0:
# for i in range(self.task_data.num_nodes):
# self.task_data.task_list[i].durations[1] = self.task_data.task_list[i].duration_gpu + np.random.normal(0, self.noise)
return self._compute_state()
def step(self, action, render_before=False, render_after=False, enforce=True, speed=False):
"""
first implementation, with only [-1, 0, ..., T] actions
:param action: -1: does nothing. t: schedules t on the current available processor
:return: next_state, reward, done, info
"""
self.num_steps += 1
self._find_available_proc()
if action == -1 and enforce:
if len(self.running_task2proc) == 0:
# the agent does nothing but every proc is available: we enforce an arbitrary action
action = self.ready_tasks[0]
if action != -1:
self.compeur_task += 1
self._choose_task_processor(action, self.current_proc)
if render_before:
self.render()
done = self._go_to_next_action(action, enforce)
if render_after and not speed:
self.render()
reward = (self.heft_time - self.time)/self.heft_time if done else 0
info = {'episode': {'r': reward, 'length': self.num_steps, 'time': self.time}, 'bad_transition': False}
if speed:
return 0, reward, done, info
return self._compute_state(), reward, done, info
def _find_available_proc(self):
while (self.current_proc < self.p) and (self.running[self.current_proc] > -1):
self.current_proc += 1
if self.current_proc == self.p:
# no new proc available
self.current_proc == 0
self._forward_in_time()
while (self.current_proc < self.p) and (self.running[self.current_proc] > -1):
self.current_proc += 1
def _forward_in_time(self):
if len(self.ready_proc[self.ready_proc > self.time]) > 0:
min_time = np.min(self.ready_proc[self.ready_proc > self.time])
else:
min_time = 0
self.time = min_time
self.ready_proc[self.ready_proc < self.time] = self.time
tasks_finished = self.running[np.logical_and(self.ready_proc == self.time, self.running > -1)].copy()
self.running[self.ready_proc == self.time] = -1
for task in tasks_finished:
del self.running_task2proc[task]
# compute successors of finished tasks
mask = isin(self.task_data.edge_index[0], torch.tensor(tasks_finished))
list_succ = self.task_data.edge_index[1][mask]
list_succ = torch.unique(list_succ)
# remove nodes
self.task_data.remove_edges(tasks_finished)
# compute new available tasks
new_ready_tasks = list_succ[torch.logical_not(isin(list_succ, self.task_data.edge_index[1, :]))]
self.ready_tasks += new_ready_tasks.tolist()
self.current_proc = np.argmin(self.running)
def _go_to_next_action(self, previous_action, enforce=True):
has_just_passed = self.is_homogene and previous_action == -1 and enforce
if has_just_passed:
self._forward_in_time()
elif previous_action == -1:
self.current_proc += 1
while len(self.ready_tasks) == 0:
self._forward_in_time()
if self._isdone():
return True
self._find_available_proc()
return False
def _choose_task_processor(self, action, processor):
# assert action in self.ready_tasks
if action != -1:
self.ready_proc[processor] += self.task_data.task_list[action].durations[self.cluster.node_types[processor]]
self.ready_tasks.remove(action)
self.processed[self.task_data.task_list[action].barcode] = [processor, self.time]
self.running_task2proc[action] = processor
self.running[processor] = action
def _compute_state(self):
visible_graph, node_num = compute_sub_graph(self.task_data,
torch.tensor(np.concatenate((self.running[self.running > -1],
self.ready_tasks)), dtype=torch.long),
self.window)
visible_graph.x, ready = self._compute_embeddings(node_num)
return {'graph': visible_graph, 'node_num': node_num, 'ready': ready}
def _remaining_time(self, running_tasks):
return torch.tensor([self.ready_proc[self.running_task2proc[task.item()]] for task in running_tasks]) - self.time
def _isdone(self):
# return (self.task_data.edge_index.shape[-1] == 0) and (len(self.running_task2proc) == 0)
return (self.compeur_task == self.num_nodes and (len(self.running_task2proc) == 0))
def _compute_embeddings(self, tasks):
ready = isin(tasks, torch.tensor(self.ready_tasks)).float()
running = isin(tasks, torch.tensor(self.running[self.running > -1])).squeeze(-1)
remaining_time = torch.zeros(tasks.shape[0])
remaining_time[running] = self._remaining_time(tasks[running].squeeze(-1)).to(torch.float)
remaining_time = remaining_time.unsqueeze(-1)
n_succ = torch.sum((tasks == self.task_data.edge_index[0]).float(), dim=1).unsqueeze(-1)
n_pred = torch.sum((tasks == self.task_data.edge_index[1]).float(), dim=1).unsqueeze(-1)
task_num = self.task_data.task_list[tasks.squeeze(-1)]
if isinstance(task_num, Task):
task_type = torch.tensor([[4]])
else:
task_type = torch.tensor([task.type for task in task_num]).unsqueeze(-1)
num_classes = 4
one_hot_type = (task_type == torch.arange(num_classes).reshape(1, num_classes)).float()
# add other embeddings
descendant_features_norm = self.norm_desc_features[tasks].squeeze(1)
# CP below task
# cpl = torch.zeros(tasks.shape[0])
# for i, task in enumerate(tasks):
# if self.task_to_CP[task] == 0:
# cpl[i] = CPAndWorkBelow(self.task_graph.task_list[task], self.n, durations_gpu)[0] / self.critic_path_duration
# self.task_to_CP[task] = cpl[i]
# else:
# cpl[i] = self.task_to_CP[task]
# cpl = cpl.unsqueeze(-1)
# add node type
node_type = torch.ones(tasks.shape[0]) * self.cluster.node_types[self.current_proc]
node_type = node_type.unsqueeze((-1))
if sum(self.cluster.node_types == 1) == 0:
min_ready_gpu = torch.FloatTensor([1]).repeat(tasks.shape[0]).unsqueeze((-1))
else:
min_ready_gpu = min(self.ready_proc[self.cluster.node_types == 1] - self.time)/self.max_duration_gpu
min_ready_gpu = torch.FloatTensor([min_ready_gpu]).repeat(tasks.shape[0]).unsqueeze((-1))
if sum(self.cluster.node_types == 0) == 0:
min_ready_cpu = torch.FloatTensor([1]).repeat(tasks.shape[0]).unsqueeze((-1))
else:
min_ready_cpu = min(self.ready_proc[self.cluster.node_types == 0] - self.time) / self.max_duration_cpu
min_ready_cpu = torch.FloatTensor([min_ready_cpu]).repeat(tasks.shape[0]).unsqueeze((-1))
# if self.current_proc > 3:
# print("what")
# return (torch.cat((n_succ/10, n_pred/10, one_hot_type, ready, running.unsqueeze(-1).float(), remaining_time/10, cpl), dim=1),
# ready)
# return (torch.cat((n_succ/10, n_pred/10, one_hot_type, ready, running.unsqueeze(-1).float(), remaining_time/10), dim=1),
# ready)
# return (torch.cat((n_succ, n_pred, one_hot_type, ready, running.unsqueeze(-1).float(), remaining_time, cpl), dim=1),
# ready)
return (torch.cat((n_succ, n_pred, one_hot_type, ready, running.unsqueeze(-1).float(), remaining_time,
descendant_features_norm, node_type, min_ready_gpu, min_ready_cpu), dim=1),
ready)
# return cpl, ready
# return (torch.cat((one_hot_type, ready, running.unsqueeze(-1).float(), remaining_time, cpl, node_type), dim=1),
# ready)
# # Compute HEFT
# def _compute_embeddings_heterogenous(self, tasks):
def render(self):
def color_task(task):
colors = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]
if task in self.running:
time_proportion =1 - (self.ready_proc[self.running_task2proc[task]] - self.time)/\
self.task_data.task_list[task].duration_cpu
color_time = [1., time_proportion, time_proportion]
return color_time
elif task in self.ready_tasks:
return colors[1]
return colors[2]
def color_processor(processor):
if self.running[processor] == -1:
return [0, 1, 0] if self.current_proc == processor else [0.7, 0.7, 0.7]
else:
time_proportion = (self.ready_proc[processor] - self.time) / \
self.task_data.task_list[self.running[processor]].duration_cpu
return [time_proportion, 0, 0]
visible_graph, node_num = compute_sub_graph(self.task_data,
torch.tensor(np.concatenate((self.running[self.running > -1],
self.ready_tasks)), dtype=torch.long),
self.window)
plt.figure(figsize=(8 , 8))
plt.suptitle('time: {}'.format(self.time))
plt.subplot(121)
plt.box(on=None)
visible_graph.render(root=list(self.running[self.running > -1]))
# plt.title('time: {}'.format(self.time))
# plt.show()
plt.subplot(122)
plt.box(on=None)
graph = to_networkx(Data(visible_graph.x, visible_graph.edge_index.contiguous()))
pos = graphviz_layout(graph, prog='dot', root=None)
# pos = graphviz_layout(G, prog='tree')
node_color = [color_task(task[0].item()) for task in node_num]
# plt.figure(figsize=(8, 8))
nx.draw_networkx_nodes(graph, pos, node_color=node_color)
nx.draw_networkx_edges(graph, pos)
labels = {}
for i, task in enumerate(node_num):
if task[0].item() in self.ready_tasks:
labels[i] = task[0].item()
nx.draw_networkx_labels(graph, pos, labels, font_size=16)
# plt.title('time: {}'.format(self.time))
plt.show()
# Cluster
edges_list = [(u, v, {"cost": self.cluster.communication_cost[u, v]}) for u in range(self.p) for v in range(self.p) if u != v]
colors = [color_processor(p) for p in range(self.p)]
G = nx.Graph()
G.add_nodes_from(list(range(len(self.cluster.node_types))))
G.add_edges_from(edges_list)
pos = graphviz_layout(G)
node_labels = {}
for i, node_type in enumerate(self.cluster.node_types):
node_labels[i] = ["CPU", "GPU"][node_type]
plt.figure(figsize=(8, 8))
nx.draw_networkx_nodes(G, pos=pos, node_color=colors, node_size=1000)
nx.draw_networkx_edges(G, pos=pos)
nx.draw_networkx_edge_labels(G, pos=pos)
nx.draw_networkx_labels(G, pos, node_labels, font_size=16)
plt.show()
def visualize_schedule(self, figsize=(80, 30), fig_file=None, flip=False):
def get_data(env):
P = env.p
Processed = env.processed
for k, v in Processed.items():
Processed[k] = [int(v[0]), int(v[1])]
# makespan should be dicrete and durations should be discretized
makespan = int(env.time)
data = np.ones((P, makespan)) * (-1)
data = data.astype(int)
compl_data = [[] for _ in range(P)]
for x, sched in Processed.items():
tasktype = x[0]
pr = sched[0]
s_time = sched[1]
e_time = s_time + Task(x).durations[env.cluster.node_types[pr]]
data[pr, s_time:e_time] = tasktype
if tasktype == 0:
compl_data[pr].insert(0, (x[1]))
elif tasktype == 1:
compl_data[pr].insert(0, (x[1], x[2]))
elif tasktype == 2:
compl_data[pr].insert(0, (x[1], x[2]))
else:
compl_data[pr].insert(0, (x[1], x[2], x[3]))
return data, compl_data
def avg(a, b):
return (a + b) / 2.0
P = self.p
data, compl_data = get_data(self)
if flip:
data = data[-1::-1, :]
compl_data = compl_data[-1::-1]
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.axes.get_yaxis().set_visible(False)
ax.set_aspect(1)
for y, row in enumerate(data):
# for x, col in enumerate(row):
x = 0
i = 0
indices_in_row = compl_data[y]
while x < len(row):
col = row[x]
if col != -1:
shift = Task([col]).durations[self.cluster.node_types[y]]
indices = indices_in_row[i]
else:
x = x + 1
continue
x1 = [x, x + shift]
y1 = np.array([y, y])
y2 = y1 + 1
if col == 0:
plt.fill_between(x1, y1, y2=y2, facecolor='green', edgecolor='Black')
plt.text(avg(x1[0], x1[1]), avg(y1[0], y2[0]), 'C({})'.format(indices),
horizontalalignment='center',
verticalalignment='center', fontsize=30)
if col == 1:
plt.fill_between(x1, y1, y2=y2, facecolor='red', edgecolor='Black')
plt.text(avg(x1[0], x1[1]), avg(y1[0], y2[0]), "S{}".format(indices),
horizontalalignment='center',
verticalalignment='center', fontsize=30)
if col == 2:
plt.fill_between(x1, y1, y2=y2, facecolor='orange', edgecolor='Black')
plt.text(avg(x1[0], x1[1]), avg(y1[0], y2[0]), "T{}".format(indices),
horizontalalignment='center',
verticalalignment='center', fontsize=30)
if col == 3:
plt.fill_between(x1, y1, y2=y2, facecolor='yellow', edgecolor='Black')
plt.text(avg(x1[0], x1[1]), avg(y1[0], y2[0]), "G{}".format(indices),
horizontalalignment='center',
verticalalignment='center', fontsize=30)
x = x + shift
i = i + 1
plt.ylim(P, 0)
plt.xlim(-1e-3, data.shape[1] + 1e-3)
plt.xticks(fontsize=50)
if fig_file != None:
plt.savefig(fig_file)
return
def export(self):
se
# legacy code
class CholeskyTaskGraph(gym.Env):
def __init__(self, n, node_types, window, noise=False):
if isinstance(node_types, int):
p = node_types
node_types = np.ones(p)
else:
p = len(node_types)
self.observation_space = Dict
self.action_space = "Graph"
self.noise = noise
self.time = 0
self.num_steps = 0
self.n = n
self.p = p
self.window = window
self.task_graph = compute_graph(n=n, noise=noise)
self.task_data = TaskGraph(self.task_graph.x.clone(), self.task_graph.edge_index.clone(), self.task_graph.task_list.copy())
# self.task_to_asap = {v: k for (k, v) in enumerate(self.task_data.task_list)}
self.cluster = Cluster(node_types=node_types.astype(int), communication_cost=np.zeros((p, p)))
self.running = -1 * np.ones(p) # array of task number
self.running_task2proc = {}
self.ready_proc = np.zeros(p) # for each processor, the time where it becomes available
self.ready_tasks = []
self.processed = {}
self.current_proc = 0
self.is_homogene = (np.mean(self.cluster.node_types) - 1) * np.mean(self.cluster.node_types) == 0
self.critic_path_duration = sum(durations_gpu[:-2]) * (self.n - 1) + durations_gpu[0] # 158
self.total_work_normalized = (n * durations_gpu[0] + n * (n - 1) / 2 * (durations_gpu[1] + durations_gpu[2]) + \
n * (n - 1) * (n - 2) / 6 * durations_gpu[3]) / p # 536 / p
self.task_to_CP = np.zeros(len(self.task_graph.task_list))
def reset(self):
self.task_data = TaskGraph(self.task_graph.x.clone(), self.task_graph.edge_index.clone(), self.task_graph.task_list.copy())
self.time = 0
self.num_steps = 0
self.running = -1 * np.ones(self.p).astype(int)
self.running_task2proc = {}
self.ready_proc = | np.zeros(self.p) | numpy.zeros |
# coding=utf-8
"""
Drivers for Monte Carlo sampling of chemical states, such as tautomers and protomers.
"""
import copy
import logging
import math
import random
from pandas import DataFrame
import pandas as pd
from pandas.util.testing import assert_frame_equal
import sys
import numpy as np
import os
from simtk import unit
from simtk import openmm as mm
from saltswap.swapper import Swapper
from .proposals import (
_StateProposal,
SaltSwapProposal,
UniformSwapProposal,
COOHDummyMover,
)
from .topology import Topology
from .saltswap_utils import update_fractional_stateVector
from .pka import available_pkas
from .ions import NeutralChargeRule, choose_neutralizing_ions_by_method
from simtk.openmm import app
from numbers import Number
import re
from .logger import log
from abc import ABCMeta, abstractmethod
from lxml import etree, objectify
from typing import Dict, List, Optional, Tuple, Any, Callable
from .integrators import GHMCIntegrator, GBAOABIntegrator
from enum import Enum
import itertools
from collections import defaultdict
kB = (1.0 * unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA).in_units_of(
unit.kilojoules_per_mole / unit.kelvin
)
np.set_printoptions(precision=15)
class SamplingMethod(Enum):
"""Enum for describing different sampling strategies."""
# MarkovChain Monte carlo sampling of states with Metropolis-Hastings like accept/reject test
# Including Non-equilibrium candidate monte carlo (NCMC)
MCMC = 0
# Importance sampling, where only one state is sampled and the work is calculated to switch,
# includind annealed importance sampling
IMPORTANCE = 1
class _TitratableResidue:
"""Representation of a single residue with multiple titration states."""
def __init__(self):
"""
Instantiate a _TitratableResidue
Notes
-----
This class should not be instantiated directly. Use `from_lists` or `from_serialized_xml` instead.
This class is intended for internal use by the ProtonDrive.
"""
# The indices of the residue atoms in the system
self.atom_indices = list()
# List to store titration states
self.titration_states: List[_TitrationState] = list()
self.index = None
self.name = None
self.residue_type = None
# NonbondedForce exceptions associated with this titration state
self.exception_indices = list()
self._state = None
self._pka_data = None
self._residue_pka = None
return
def __eq__(self, other):
for own_state, other_state in zip(
self.titration_states, other.titration_states
):
if own_state != other_state:
return False
if self.name != other.name:
return False
if self.residue_type != other.residue_type:
return False
if self.index != other.index:
return False
if self._state != other._state:
return False
if self._pka_data is not None and other._pka_data is not None:
try:
assert_frame_equal(
self._pka_data, other._pka_data, check_less_precise=4
)
except AssertionError:
return False
if self._residue_pka != other._residue_pka:
return False
return True
@classmethod
def from_lists(
cls,
atom_indices,
group_index,
name,
residue_type,
exception_indices,
pka_data=None,
residue_pka=None,
):
"""
Instantiate a _TitratableResidue from lists and strings that contain all necessary information
Parameters
----------
atom_indices - list of system indices of the residue atoms
group_index - the index of the residue in the list of titratable residues
name - str, an identifier for this residue
residue_type - str, the 3 letter residue type in the forcefield specification (e.g. AS4).
exception_indices - list of NonbondedForce exceptions associated with this titration state
pka_data - dict, optional, dict of weights, with pH as key. floats as keys. Not compatible with residue_pka option.
residue_pka - PopulationCalculator, optional. Can be used to provide target weights at a given pH. Not compatible with pka_data option.
"""
# The indices of the residue atoms in the system
obj = cls()
obj.atom_indices = list(atom_indices) # deep copy
# List to store titration states
obj.titration_states = list()
obj.index = group_index
obj.name = name
obj.residue_type = residue_type
# NonbondedForce exceptions associated with this titration state
obj.exception_indices = exception_indices
obj._state = None
obj._pka_data = None
obj._residue_pka = None
if pka_data is not None and residue_pka is not None:
raise ValueError("You can only provide pka_data, or residue_pka, not both.")
elif pka_data is not None:
obj._pka_data = pka_data
elif residue_pka is not None:
obj._residue_pka = residue_pka
return obj
@classmethod
def from_serialized_xml(cls, xmltree):
"""Create a titratable residue from a serialized titratable residue.
Parameters
----------
xmltree - etree.ElementTree or compatible lxml class, should only contain one residue.
Returns
-------
obj - a newly instantiated _TitratableResidue object.
"""
# prevent accidental modification of the user supplied file.
xmltree = copy.deepcopy(xmltree)
obj = cls()
# The indices of the residue atoms in the system
atom_indices = list()
res = xmltree.xpath("/TitratableResidue")[0]
for atom in xmltree.xpath("/TitratableResidue/atom"):
atom_indices.append(int(atom.get("index")))
obj.atom_indices = atom_indices
# List to store titration states
obj.titration_states = list()
obj.index = int(res.get("index"))
obj.name = str(res.get("name"))
obj.residue_type = str(res.get("type"))
# NonbondedForce exceptions associated with this titration state
exception_indices = list()
for exception in xmltree.xpath("/TitratableResidue/exception"):
exception_indices.append(int(exception.get("index")))
obj.exception_indices = exception_indices
obj._state = None
obj._pka_data = None
obj._residue_pka = None
# parse the pka data block as if an html table
pka_data = xmltree.xpath("/TitratableResidue/pka_data")
if len(pka_data):
pka_data = copy.deepcopy(pka_data[0])
pka_data.tag = "table"
obj._pka_data = pd.read_html(etree.tostring(pka_data))[0]
res_pka = res.get("residue_pka")
if res_pka is not None:
obj._residue_pka = available_pkas[res_pka]
if obj._pka_data is not None and obj._residue_pka is not None:
raise ValueError("You can only provide pka_data, or residue_pka, not both.")
states = xmltree.xpath("/TitratableResidue/TitrationState")
obj.titration_states = [None] * len(states)
for state in states:
state_index = int(state.get("index"))
obj.titration_states[state_index] = _TitrationState.from_serialized_xml(
state
)
# Set the titration state of this residue
obj.state = int(res.get("state"))
return obj
def add_state(self, state):
"""Adds a _TitrationState to the residue."""
self.titration_states.append(state)
def serialize(self):
"""
Create an xml representation of this residue.
Returns
-------
res - lxml tree containing residue information
"""
# xml factory
E = objectify.E
res = E.TitratableResidue(
name=self.name,
type=self.residue_type,
index=str(self.index),
state=str(self.state_index),
)
if self._residue_pka is not None:
# residue_pka holds a reference to the base class.
# Storing the name of the type, which can be looked to find it from the available_pkas dict
res.set("residue_pka", self.residue_type)
for atom_index in self.atom_indices:
objectify.SubElement(res, "atom", index=str(atom_index))
for exception_index in self.exception_indices:
objectify.SubElement(res, "exception", index=str(exception_index))
if self._pka_data is not None:
res.pka_data = objectify.fromstring(self._pka_data.to_html(index=False))
res.TitrationState = E.TitrationState()
res.TitrationState[:] = [
state.serialize(index) for index, state in enumerate(self.titration_states)
][:]
return res
def get_populations(self, pH, temperature=None, ionic_strength=None, strict=True):
"""Return the state populations for a given pH.
Parameters
----------
pH - float, the pH for which populations should be returned
temperature - float, the temperature in Kelvin that should be used to find the log populations if available.
Optional, soft requirement. Won't throw error if not matched.
ionic_strength - float, the ionic strength in millimolar that should be used to find the log populations if available.
Optional, soft requirement. Won't throw error if not matched.
strict - bool, default True. If there are no pH dependent weights, throw an error. Else, just return default weights.
Notes
-----
Temperature, and ionic strength are soft requirements.
"""
log_weights = np.empty(len(self))
# look up weights in the dictionary
if self._pka_data is not None:
# Search an appropriate log population value from the dataframe that was constructed.
# Temperature and Ionic strength aren't always provided.
for state, group in self._pka_data.groupby("State"):
# Get the first element where the pH matches, the temperature potentially matches, and the ionic strenght potentially matches
state = int(state)
pH_match = group["pH"] == pH
temperature_match = True
ionic_strength_match = True
if temperature is not None:
temperature_match = group["Temperature (K)"].isin(
[temperature, None]
)
if ionic_strength is not None:
ionic_strength_match = group["Ionic strength (mM)"].isin(
ionic_strength, None
)
matches = group.loc[pH_match & temperature_match & ionic_strength_match]
# If there are no matches, throw an error
if len(matches) == 0:
raise ValueError(
"There is no matching pH/temperature condition available for residue {}.".format(
self.name
)
)
# get the first one
else:
first_row = next(matches.iterrows())
# index 1 is the row values, get the log population
log_population = first_row[1]["log population"]
log_weights[state] = log_population
# calculate residue weights from pka object
elif self._residue_pka is not None:
log_weights = self._residue_pka(pH).populations()
# If there is no pH dependent population specified, return the current target populations.
# This will be equal if this was never specified previously. See the target_weights property.
else:
if strict:
raise RuntimeError(
"Residue is not adjustable by pH. {}".format(self.name)
)
else:
log_weights = np.log(np.asarray(self.target_weights))
return np.asarray(log_weights)
def set_populations(self, pH):
"""
Set the target weights using the pH
Parameters
----------
pH - float, the pH of the simulation.
"""
# old_weights = np.asarray(self.target_weights)
self.target_weights = np.exp(self.get_populations(pH, strict=True))
ph_correction = -np.log(self.target_weights)
self.g_k_values = np.asarray(self.g_k_values) + ph_correction
@property
def state(self):
"""
Returns
-------
_TitrationState
"""
return self.titration_states[self._state]
@property
def state_index(self):
"""
The index of the current state of the residue.
"""
return self._state
@state.setter
def state(self, state: int):
"""
Set the titration state index. Warning: does not update the parameters.
This should only be modified by a proton drive.
"""
if state > len(self):
raise IndexError(
"Titration state index out of bounds. ( > {}".format(len(self))
)
self._state = state
@property
def target_weights(self):
"""Target weight of each state. Default is equal weights."""
target_weights = [state.target_weight for state in self.titration_states]
if None in target_weights:
return [1.0 / len(self)] * len(self)
else:
return target_weights
@target_weights.setter
def target_weights(self, weights):
"""Set sampling target weights for all states."""
if not len(weights) == len(self):
raise ValueError(
"The number of weights needs to be equal to the number of states."
)
for id, state in enumerate(self):
state.target_weight = weights[id]
@property
def g_k_values(self) -> List[float]:
"""A list containing the g_k value for each state."""
return [state.g_k for state in self]
@g_k_values.setter
def g_k_values(self, g_klist: List[float]):
"""Set sampling target weights for all states."""
if not len(g_klist) == len(self):
raise ValueError(
"The number of g_k values needs to be equal to the number of states."
)
for id, state in enumerate(self):
state.g_k = g_klist[id]
@property
def proton_count(self):
"""Number of titratable protons in current state."""
return self.state.proton_count
@property
def proton_counts(self):
"""Number of titratable protons active in each state."""
return [state.proton_count for state in self]
def __len__(self):
"""Return length of group."""
return len(self.titration_states)
def __getitem__(self, item):
"""Retrieve state by index.
Parameters
----------
item - int
Titration state to be accessed.
"""
if item >= len(self.titration_states):
raise IndexError("Titration state outside of range.")
else:
return self.titration_states[item]
@property
def atom_status(self):
"""Returns boolean array of atoms, and if they're switched on.
Defined as charge equal to 0 (to precision of 1.e-9
"""
return [0 if abs(charge) < 1.0e-9 else 1 for charge in self.state.charges]
@property
def total_charge(self) -> int:
"""Total charge of the current titration state."""
return self.state.total_charge
@property
def total_charges(self):
"""Total charge of each state."""
return [state.total_charge for state in self]
class _TitrationState:
"""Representation of a titration state."""
def __init__(self):
"""Instantiate a _TitrationState, for internal use by ProtonDrive classes."""
self.g_k = None # dimensionless quantity
self.charges = list()
self.proton_count = (
None
) # Number of titratable protons compared to the most deprotonated state
self._cation_count = (
None
) # Number of cations to maintain charge compared to other states
self._anion_count = (
None
) # Number of anions to maintain charge compared to other states
self._forces = list()
self._target_weight = None
# MC moves should be functions that take the positions, and return updated positions,
# and a log (reverse/forward) proposal probability ratio
self._mc_moves = dict() # Dict[str, List[COOHDummyMover]]
@classmethod
def from_lists(
cls,
g_k: float,
charges: List[float],
proton_count: int,
cation_count: int,
anion_count: int,
cooh_movers: Optional[List[COOHDummyMover]] = None,
):
"""Instantiate a _TitrationState from g_k, proton count and a list of the charges
Returns
-------
obj - a new _TitrationState instance
"""
obj = cls()
obj.g_k = g_k # dimensionless quantity
obj.charges = copy.deepcopy(charges)
obj.proton_count = proton_count
obj._cation_count = cation_count
obj._anion_count = anion_count
# Note that forces are to be manually added by force caching functionality in ProtonDrives
obj._forces = list()
obj._target_weight = None
if cooh_movers is not None:
for mover in cooh_movers:
if "COOH" not in obj._mc_moves:
obj._mc_moves["COOH"] = list()
obj._mc_moves["COOH"].append(mover)
return obj
@classmethod
def from_serialized_xml(cls, state_element):
"""
Deserialize a _TitrationState from a previously serialized xml tree
Parameters
----------
xmltree - etree.Element or compatible lxml class containing one single titration state
Returns
-------
obj - a new _TitrationState instance
"""
obj = cls()
# prevent accidental modification
state = copy.deepcopy(state_element)
obj.proton_count = int(state.get("proton_count"))
obj._cation_count = int(state.get("cation_count"))
obj._anion_count = int(state.get("anion_count"))
target_weight = state.get("target_weight")
obj._target_weight = (
None if target_weight == "None" else np.float64(target_weight)
)
obj.g_k = np.float64(state.get("g_k"))
charges = state.xpath("charge")
obj.charges = [None] * len(charges)
for charge in charges:
# Get the array index
charge_index = int(charge.get("charge_index"))
charge_value = np.float64(charge.text)
obj.charges[charge_index] = charge_value
# forces is a list of forces, though currently in practice its of length one and contains only nonbonded force
# Inside each force is a dict containing 'atoms', and 'exceptions'
# 'atoms' and 'exceptions' are lists
# Inside of the list are dicts.
# Each dictionary contains the parameters for either an atom, or an exception.
# For atom it contains 'charge', 'sigma', 'epsilon', and 'atom_index'.
# For exception it contains 'exception_index' 'particle1' 'particle2' 'chargeProd' 'sigma', and 'epsilon'
forces = state.xpath("force")
obj._forces = [None] * len(forces)
for f_index, force in enumerate(forces):
force_dict = dict(atoms=list(), exceptions=list())
for atom in force.xpath("atom"):
atom_dict = dict()
for key in [
"atom_index",
"charge",
"epsilon",
"sigma",
"radius",
"scaleFactor",
]:
if key == "atom_index":
atom_dict[key] = int(atom.get(key))
else:
param_value = atom.get(key)
if param_value is not None:
atom_dict[key] = np.float64(param_value)
force_dict["atoms"].append(atom_dict)
for exception in force.xpath("exception"):
exc_dict = dict()
for key in [
"chargeProd",
"epsilon",
"exception_index",
"particle1",
"particle2",
"sigma",
]:
if key in ["particle1", "particle2", "exception_index"]:
exc_dict[key] = int(exception.get(key))
else:
exc_dict[key] = np.float64(exception.get(key))
force_dict["exceptions"].append(exc_dict)
obj._forces[f_index] = force_dict
# instantiate supported MCMoves from xml
# throws KeyError if there is an unimplemented move present
for child in state.xpath("MCMoves")[0].iterchildren():
if child.tag == "COOH":
for grandchild in child.iterchildren():
if grandchild.tag == "COOHDummyMover":
mover = COOHDummyMover.from_xml(grandchild)
try:
obj._mc_moves["COOH"].append(mover)
except KeyError:
obj._mc_moves["COOH"] = [mover]
else:
raise KeyError(
"Unknown COOH movetype found in XML: {}".format(
grandchild.tag
)
)
else:
raise KeyError(
"Unsupported MC movetype found in XML: {}".format(child.tag)
)
return obj
@property
def total_charge(self) -> int:
"""Return the total charge of the state."""
return int(round(sum(self.charges)))
@property
def anion_count(self) -> int:
return self._anion_count
@anion_count.setter
def anion_count(self, n_anions: int):
if type(n_anions) != int:
raise TypeError("The anion count should be integer.")
self._anion_count = n_anions
@property
def cation_count(self) -> int:
return self._cation_count
@cation_count.setter
def cation_count(self, n_cations: int):
if type(n_cations) != int:
raise TypeError("The cation count should be integer.")
self._cation_count = n_cations
@property
def forces(self):
return self._forces
@forces.setter
def forces(self, force_params):
self._forces = copy.deepcopy(force_params)
@property
def target_weight(self):
return self._target_weight
@target_weight.setter
def target_weight(self, weight):
self._target_weight = weight
def serialize(self, index=None):
"""Serialize a state into xml etree.
Returns
-------
state - objectify tree
"""
E = objectify.E
if index is not None:
index = str(index)
# Only serializing values that are not properties.
state = E.TitrationState(
proton_count=str(self.proton_count),
cation_count=str(self._cation_count),
anion_count=str(self._anion_count),
target_weight=str(self.target_weight),
index=index,
g_k=str(self.g_k),
)
q_tags = list()
for q_index, q in enumerate(self.charges):
# Ensure float is numpy type for print precision as specified in numpy print options
q = np.float64(q)
q_tags.append(E.charge("{:.15f}".format(q), charge_index=str(q_index)))
state.charge = E.charge
state.charge[:] = q_tags[:]
# forces is a list of forces, though currently in practice its of length one and contains only nonbonded force
# Other forces will get serialized correctly, but deserialization may be an issue.
# Inside each force is a dict containing 'atoms', and 'exceptions'
# 'atoms' and 'exceptions' are lists
# Inside of the list are dicts.
# Each dictionary contains the parameters for either an atom, or an exception.
# For atom it contains 'charge', 'sigma', 'epsilon', and 'atom_index'.
# For exception it contains 'exception_index' 'particle1' 'particle2' 'chargeProd' 'sigma', and 'epsilon'
for f_index, force in enumerate(self._forces):
force_xml = E.force(
index=str(
f_index
) # the force index in the internal state, not the force index in openmm
)
atoms = force["atoms"]
if "exceptions" in force:
exceptions = force["exceptions"]
else:
exceptions = []
for atom in atoms:
# Convert to string for xml storage
atom_strings = dict(atom)
for key in atom.keys():
if key == "atom_index":
atom_strings[key] = str(atom[key])
else:
# Ensure numpy type for print precision
atom_strings[key] = "{:.15f}".format(np.float64(atom[key]))
atom_tag = objectify.SubElement(force_xml, "atom", **atom_strings)
for exception in exceptions:
exception_strings = dict(exception)
for key in exception.keys():
if key in ["particle1", "particle2", "exception_index"]:
exception_strings[key] = str(exception[key])
else:
# Ensure numpy type for print precision
exception_strings[key] = "{:.15f}".format(
np.float64(exception[key])
)
exception_tag = objectify.SubElement(
force_xml, "exception", **exception_strings
)
state.append(force_xml)
# Titration state specific MCMoves are serialized using their to_xml method
mcmoves = objectify.SubElement(state, "MCMoves")
for mcmove, mcmovelist in self._mc_moves.items():
mcmovexml = objectify.fromstring("<{}/>".format(mcmove))
for submove in mcmovelist:
submovexml = objectify.fromstring(submove.to_xml())
mcmovexml.append(submovexml)
mcmoves.append(mcmovexml)
return state
def __eq__(self, other):
"""Compare the equality of two _TitrationState objects."""
if not isinstance(other, _TitrationState):
return False
float_atol = 1.0e-10
if not np.isclose(
self._target_weight, other._target_weight, rtol=0.0, atol=float_atol
):
return False
if not np.isclose(
self.proton_count, other.proton_count, rtol=0.0, atol=float_atol
):
return False
if not np.isclose(self.g_k, other.g_k, rtol=0.0, atol=float_atol):
return False
if len(self.charges) != len(other.charges):
return False
# Check if all stored charges are equal
if not np.all(
np.isclose(self.charges, other.charges, atol=float_atol, rtol=0.0)
):
return False
# check if all force parameters are equal
for own_force, other_force in zip(self._forces, other._forces):
own_atoms, other_atoms = own_force["atoms"], other_force["atoms"]
own_exceptions, other_exceptions = (
own_force["exceptions"],
other_force["exceptions"],
)
for own_atom, other_atom in zip(own_atoms, other_atoms):
for key in own_atom.keys():
if not np.isclose(
own_atom[key], other_atom[key], rtol=0.0, atol=float_atol
):
return False
for own_exception, other_exception in zip(own_exceptions, other_exceptions):
for key in own_exception.keys():
if not np.isclose(
own_exception[key],
other_exception[key],
rtol=0.0,
atol=float_atol,
):
return False
# Everything that was checked seems equal.
return True
class SAMSApproach(Enum):
"""Various ways of running SAMS for a titration drive.
Notes
-----
This class is defined here for indicating which approach is used to run SAMS
SAMSApproach.ONESITE - A single residue is sampled using SAMS, while the rest is treated normally.
SAMSAproach.MULTISITE - A combination of all residue states is treated as a single state.
Example: 2 hydroxy residues have 4 states (OH1 OH2, O-1 OH2, OH1 O-1, O-1 O-2)
"""
ONESITE = 0
MULTISITE = 1
class Stage(Enum):
"""Stages of a sams run."""
NODECAY = (
-1
) # Initial guess constructing, do not adjust gain factor or SAMS iteration number.
SLOWDECAY = 0 # Fast gain but not optimal convergence
FASTDECAY = 1 # Slower gain but optimal asymptotic convergence
EQUILIBRIUM = 2 # No adaptation of g_k
class UpdateRule(Enum):
"""SAMS update rule."""
BINARY = 0
GLOBAL = 1
class _SAMSState:
"""A table to contain SAMS free energies (zeta or g_k) and targets (pi) for constant-pH residues."""
def __init__(
self,
state_counts: List[int],
approach: SAMSApproach,
group_index: Optional[int] = None,
update_rule: UpdateRule = UpdateRule.BINARY,
beta_sams: float = 0.5,
flatness_criterion: float = 0.05,
min_burn: int = 200,
min_slow: int = 200,
min_fast: int = 200,
):
"""Set up tracking for SAMS calibration weights.
Parameters
----------
state_counts - list of the number of states that each titratable residue has.
approach - one of the available ways of running SAMS (see ``SAMSApproach``)
group_index - integer, SAMSApproach.ONESITE only, specify the site.
update_rule - The update rule to use
beta_sams - SAMS two-stage coefficient to determine gain in first stage
flatness_criterion - how flat the absolute histogram needs to be to switch to slow gain
min_burn - minimum iterations before gain decay is initiated
min_slow - minimum number of SAMS iterations before fast decay is initiated.
min_fast - minimum number of SAMS iterations before equilibrium stage is initiated.
"""
# Contains SAMS free energy estimates
self._free_energy_table: np.ndarray = None
# Target weights
self._target_table: np.ndarray = None
# Observed histogram counts
self._observed_table: np.ndarray = None
# Indices in flattened array
self._index_table: np.ndarray = None
# state of the free energy calculation
self._update_rule: UpdateRule = update_rule
self._beta_sams: float = beta_sams
self._flatness_criterion = flatness_criterion
self._min_burn: int = min_burn
self._min_slow: int = min_slow
self._min_fast: int = min_fast
# Starting adaptation uses negative numbers to indicate burn-in
self._current_adaptation: int = -1 * min_burn
self._stage: Stage = Stage.NODECAY
self._end_of_slowdecay: int = 0
if not isinstance(approach, SAMSApproach):
raise TypeError("Please provide a SAMSApproach.")
# Group index is the last residue if not provided
if approach is SAMSApproach.ONESITE:
self.group_index = -1 if group_index is None else group_index
elif approach is SAMSApproach.MULTISITE:
if group_index is not None:
raise NotImplementedError(
"group_index should not be provided for multi site SAMS."
)
self.group_index = group_index
self.approach = approach
if approach is SAMSApproach.ONESITE:
# Every value in the table is the sams free energy/target weight of one independent titration state
# Note that the weights in one site should only change for one residue at a time.
# However, calibrated values may be stored, as they are internally used for calculation of relative probabilities.
self._free_energy_table = list()
self._target_table = list()
self._index_table = list()
self._observed_table = list()
for state in state_counts:
self._free_energy_table.append(np.zeros(state, dtype=np.float64))
targets = np.ones(state, dtype=np.float64) / state
self._target_table.append(targets)
self._observed_table.append(np.zeros(state, dtype=np.float64))
self._index_table.append(np.arange(state))
self._free_energy_table = np.asarray(self._free_energy_table)
self._target_table = np.asarray(self._target_table)
self._index_table = np.asarray(self._index_table)
self._observed_table = np.asarray(self._observed_table)
elif approach is SAMSApproach.MULTISITE:
# Every value in the table is one joint titration state
# Default value set to 0, but can be tweaked later with initial guesses.
self._free_energy_table = np.zeros(state_counts, dtype=np.float64)
# These should be equal for multisite sams
total_count = int(np.prod(state_counts))
self._target_table = np.ones(state_counts, dtype=np.float64) / (total_count)
self._observed_table = np.zeros(state_counts, dtype=np.float64)
# For looking up index in the flattened array.
self._index_table = np.arange(total_count).reshape(state_counts)
def free_energy(self, titration_states: List[int]) -> float:
"""Return the sams free energy value for the provided titration state.
Parameters
----------
titration_states - list of the indices of the titration state of each individual residue
Notes
-----
For one site, only the free energy of the calibrated residue is added.
"""
# In case of the one site sams approach, its only the current state of the residue that is being calibrated.
if self.approach is SAMSApproach.ONESITE:
if len(titration_states) != len(self._free_energy_table):
raise ValueError(
"The number of titration states in the table does not match what was provided."
)
state = titration_states[self.group_index]
return self._free_energy_table[self.group_index][state]
# In case of the multisite sams approach, the sams weight is the one value in the table matching the joint state
elif self.approach is SAMSApproach.MULTISITE:
if len(titration_states) != len(self._free_energy_table.shape):
raise ValueError(
"The number of titration states provided does not match the dimensionality of the table."
)
return self._free_energy_table[tuple(titration_states)]
def target(self, titration_states: List[int]) -> np.float64:
"""Return the target weight for the supplied state."""
# In case of the one site sams approach, the sams weight is the total weight of every titration state
weight = None
if self.approach is SAMSApproach.ONESITE:
current_state = titration_states[self.group_index]
if len(titration_states) != len(self._free_energy_table):
raise ValueError(
"The number of titration states in the table does not match what was provided."
)
return self._target_table[self.group_index][current_state]
# In case of the multisite sams approach, the sams weight is the one value in the table matching the joint state
elif self.approach is SAMSApproach.MULTISITE:
if len(titration_states) != len(self._free_energy_table.shape):
raise ValueError(
"The number of titration states provided does not match the dimensionality of the table."
)
weight = self._target_table[tuple(titration_states)]
return weight
def observed(self, titration_states: List[int]) -> np.float64:
"""Return the histogram count for the supplied state."""
# In case of the one site sams approach, the sams weight is the total weight of every titration state
counts = None
if self.approach is SAMSApproach.ONESITE:
current_state = titration_states[self.group_index]
if len(titration_states) != len(self._observed_table):
raise ValueError(
"The number of titration states in the table does not match what was provided."
)
return self._observed_table[self.group_index][current_state]
# In case of the multisite sams approach, the sams weight is the one value in the table matching the joint state
elif self.approach is SAMSApproach.MULTISITE:
if len(titration_states) != len(self._observed_table.shape):
raise ValueError(
"The number of titration states provided does not match the dimensionality of the table."
)
counts = self._observed_table[tuple(titration_states)]
return counts
@property
def targets(self) -> np.ndarray:
"""Return entire row of targets."""
if self.approach is SAMSApproach.ONESITE:
return self._target_table[self.group_index]
elif self.approach is SAMSApproach.MULTISITE:
return self._target_table.flatten()
@property
def observed_counts(self) -> np.ndarray:
"""Return entire row of histogram counts."""
if self.approach is SAMSApproach.ONESITE:
return self._observed_table[self.group_index]
elif self.approach is SAMSApproach.MULTISITE:
return self._observed_table.flatten()
@property
def deviation_from_target(self) -> np.ndarray:
"""Return the signed deviation from target for every state."""
# Ensure normalization works even if all observations are zero.
total = max(1.0, np.sum(self.observed_counts))
return (self.observed_counts / total) - self.targets
@property
def max_absolute_deviation(self) -> float:
"""Return the maximum absolute deviation between sampled and target histogram."""
return np.max(np.abs(self.deviation_from_target))
@property
def free_energies(self) -> np.ndarray:
"""Return entire row of sams free energies."""
if self.approach is SAMSApproach.ONESITE:
return self._free_energy_table[self.group_index]
elif self.approach is SAMSApproach.MULTISITE:
return self._free_energy_table.flatten()
@free_energies.setter
def free_energies(self, free_energies: np.ndarray):
"""Update all free energy values from a 1D array."""
if not free_energies.ndim == 1:
raise ValueError("Free energy input needs to be one dimensional.")
if self.approach is SAMSApproach.ONESITE:
self._free_energy_table[self.group_index] = free_energies
elif self.approach is SAMSApproach.MULTISITE:
self._free_energy_table = free_energies.reshape(
self._free_energy_table.shape
)
@targets.setter
def targets(self, targets):
"""Update all targets from a 1D array."""
if not targets.ndim == 1:
raise ValueError("Target input needs to be one dimensional.")
if self.approach is SAMSApproach.ONESITE:
self._target_table[self.group_index] = targets
elif self.approach is SAMSApproach.MULTISITE:
self._target_table = targets.reshape(self._target_table.shape)
@observed_counts.setter
def observed_counts(self, counts):
"""Update all observed counts from a 1D array."""
if not counts.ndim == 1:
raise ValueError("Target input needs to be one dimensional.")
if self.approach is SAMSApproach.ONESITE:
self._observed_table[self.group_index] = counts
elif self.approach is SAMSApproach.MULTISITE:
self._observed_table = counts.reshape(self._observed_table.shape)
def reset_observed_counts(self):
"""Reset the observed counts to zero."""
self.observed_counts = | np.zeros_like(self.observed_counts) | numpy.zeros_like |
# Copyright (c) 2020: <NAME> (<EMAIL>).
#
# This file is modified from <https://github.com/philip-huang/PIXOR>:
# Copyright (c) [2019] [<NAME>]
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Utils for PIXOR detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import pickle
import numpy as np
import tensorflow as tf
from shapely.geometry import Polygon
def get_eval_lists(images, latents, model_net, pixor_size=128):
gt_boxes_list = []
corners_list = []
scores_list = []
gt_pixor_state_list = []
recons_pixor_state_list = []
for i in range(len(latents)):
latent_eps = latents[i]
image_eps = images[i]
for j in range(len(latent_eps)):
latent = latent_eps[j]
dict_obs = image_eps[j]
dict_recons = model_net.reconstruct_pixor(latent)
vh_clas_recons = tf.squeeze(dict_recons['vh_clas'], axis=-1) # (B,H,W,1)
vh_regr_recons = dict_recons['vh_regr'] # (B,H,W,6)
decoded_reg_recons = decode_reg(vh_regr_recons, pixor_size) # (B,H,W,8)
pixor_state_recons = dict_recons['pixor_state']
vh_clas_obs = tf.squeeze(dict_obs['vh_clas'], axis=-1) # (B,H,W,1)
vh_regr_obs = dict_obs['vh_regr'] # (B,H,W,6)
decoded_reg_obs = decode_reg(vh_regr_obs, pixor_size) # (B,H,W,8)
pixor_state_obs = dict_obs['pixor_state']
B = vh_regr_obs.shape[0]
for k in range(B):
gt_boxes, _ = pixor_postprocess(vh_clas_obs[k], decoded_reg_obs[k])
corners, scores = pixor_postprocess(vh_clas_recons[k], decoded_reg_recons[k]) # (N,4,2)
gt_boxes_list.append(gt_boxes)
corners_list.append(corners)
scores_list.append(scores)
gt_pixor_state_list.append(pixor_state_obs[k])
recons_pixor_state_list.append(pixor_state_recons[k])
return gt_boxes_list, corners_list, scores_list, gt_pixor_state_list, recons_pixor_state_list
def get_eval_metrics(images, latents, model_net, pixor_size=128, ap_range=[0.3,0.5,0.7], filename = 'metrics'):
gt_boxes_list, corners_list, scores_list, gt_pixor_state_list, recons_pixor_state_list \
= get_eval_lists(images, latents, model_net, pixor_size=pixor_size)
N = len(gt_boxes_list)
APs = {}
precisions = {}
recalls = {}
for ap in ap_range:
gts = 0
preds = 0
all_scores = []
all_matches = []
for i in range(N):
gt_boxes = gt_boxes_list[i]
corners = corners_list[i]
scores = scores_list[i]
gt_match, pred_match, overlaps = compute_matches(gt_boxes,
corners, scores, iou_threshold=ap)
num_gt = gt_boxes.shape[0]
num_pred = len(scores)
gts += num_gt
preds += num_pred
all_scores.extend(list(scores))
all_matches.extend(list(pred_match))
all_scores = np.array(all_scores)
all_matches = np.array(all_matches)
sort_ids = np.argsort(all_scores)
all_matches = all_matches[sort_ids[::-1]]
if gts == 0 or preds == 0:
return
AP, precision, recall, p, r = compute_ap(all_matches, gts, preds)
print('ap', ap)
print('AP', AP)
print('precision', p)
print('recall', r)
APs[ap] = AP
precisions[ap] = precision
recalls[ap] = recall
results = {}
results['APs'] = APs
results['precisions'] = precisions
results['recalls'] = recalls
error_position = []
error_heading = []
error_velocity = []
for i in range(N):
gt_pixor_state = gt_pixor_state_list[i]
recons_pixor_state = recons_pixor_state_list[i]
x0, y0, cos0, sin0, v0 = gt_pixor_state
x, y, cos, sin, v = recons_pixor_state
error_position.append(np.sqrt((x-x0)**2+(y-y0)**2))
yaw0 = np.arctan2(sin0, cos0)
cos0 = np.cos(yaw0)
sin0 = np.sin(yaw0)
yaw = np.arctan2(sin, cos)
cos = | np.cos(yaw) | numpy.cos |
#!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by <NAME> <<EMAIL>>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import platform
import numpy as np
import scipy.sparse
import pytest
import warnings
import librosa
from test_core import srand
np.set_printoptions(precision=3)
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize("y", [np.random.randn(32)])
@pytest.mark.parametrize("axis", [0, -1])
def test_frame1d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize(
"y, axis", [(np.asfortranarray(np.random.randn(16, 32)), -1), (np.ascontiguousarray(np.random.randn(16, 32)), 0)]
)
def test_frame2d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
y = y.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
def test_frame_0stride():
x = np.arange(10)
xpad = x[np.newaxis]
xpad2 = np.atleast_2d(x)
xf = librosa.util.frame(x, 3, 1)
xfpad = librosa.util.frame(xpad, 3, 1)
xfpad2 = librosa.util.frame(xpad2, 3, 1)
assert np.allclose(xf, xfpad)
assert np.allclose(xf, xfpad2)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_badtype():
librosa.util.frame([1, 2, 3, 4], frame_length=2, hop_length=1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("x", [np.arange(16)])
def test_frame_too_short(x, axis):
librosa.util.frame(x, frame_length=17, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_bad_hop():
librosa.util.frame(np.arange(16), frame_length=4, hop_length=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [1, 2])
def test_frame_bad_axis(axis):
librosa.util.frame(np.zeros((3, 3, 3)), frame_length=2, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x, axis", [(np.zeros((4, 4), order="C"), -1), (np.zeros((4, 4), order="F"), 0)])
def test_frame_bad_contiguity(x, axis):
librosa.util.frame(x, frame_length=2, hop_length=1, axis=axis)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
def test_pad_center(y, m, axis, mode):
n = m + y.shape[axis]
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[tuple(eq_slice)])
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("n", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_pad_center_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [-5, 0, 5])
@pytest.mark.parametrize("axis", [0, -1])
def test_fix_length(y, m, axis):
n = m + y.shape[axis]
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[tuple(eq_slice)])
else:
assert np.allclose(y[tuple(eq_slice)], y)
@pytest.mark.parametrize("frames", [np.arange(20, 100, step=15)])
@pytest.mark.parametrize("x_min", [0, 20])
@pytest.mark.parametrize("x_max", [20, 70, 120])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames, x_min=x_min, x_max=x_max, pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("frames", [np.arange(-20, 100)])
@pytest.mark.parametrize("x_min", [None, 0, 20])
@pytest.mark.parametrize("x_max", [None, 0, 20])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames_fail_negative(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
@pytest.mark.parametrize("norm", [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None])
@pytest.mark.parametrize("ndims,axis", [(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 1), (3, 2), (3, -1)])
def test_normalize(ndims, norm, axis):
srand()
X = np.random.randn(*([4] * ndims))
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
# Shape and dtype checks
assert X_norm.dtype == X.dtype
assert X_norm.shape == X.shape
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = np.ones(1)
else:
values = np.sum(X_norm ** norm, axis=axis) ** (1.0 / norm)
assert np.allclose(values, np.ones_like(values))
@pytest.mark.parametrize("norm", ["inf", -0.5, -2])
@pytest.mark.parametrize("X", [np.ones((3, 3))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badnorm(X, norm):
librosa.util.normalize(X, norm=norm)
@pytest.mark.parametrize("badval", [np.nan, np.inf, -np.inf])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_bad_input(badval):
X = np.ones((3, 3))
X[0] = badval
librosa.util.normalize(X, norm=np.inf, axis=0)
@pytest.mark.parametrize("fill", [7, "foo"])
@pytest.mark.parametrize("X", [np.ones((2, 2))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badfill(X, fill):
librosa.util.normalize(X, fill=fill)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize(
"threshold, result",
[(None, [[0, 1, 1, 1]]), (1, [[0, 1, 1, 1]]), (2, [[0, 1, 1, 1]]), (3, [[0, 1, 2, 1]]), (4, [[0, 1, 2, 3]])],
)
def test_normalize_threshold(x, threshold, result):
assert np.allclose(librosa.util.normalize(x, threshold=threshold), result)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize("threshold", [0, -1])
def test_normalize_threshold_fail(x, threshold):
librosa.util.normalize(x, threshold=threshold)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_fill_l0():
X = np.ones((2, 2))
librosa.util.normalize(X, fill=True, norm=0)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_fill_allaxes(X, norm):
Xn = librosa.util.normalize(X, fill=True, axis=None, norm=norm)
if norm is np.inf:
assert np.allclose(Xn, 1)
else:
assert np.allclose(np.sum(Xn ** norm) ** (1.0 / norm), 1)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_nofill(X, norm):
Xn = librosa.util.normalize(X, fill=False, norm=norm)
assert np.allclose(Xn, 0)
@pytest.mark.parametrize("X", [np.asarray([[0.0, 1], [0, 1]])])
@pytest.mark.parametrize("norm,value", [(1, 0.5), (2, np.sqrt(2) / 2), (np.inf, 1)])
@pytest.mark.parametrize("threshold", [0.5, 2])
def test_normalize_fill(X, threshold, norm, value):
Xn = librosa.util.normalize(X, fill=True, norm=norm, threshold=threshold)
assert np.allclose(Xn, value)
@pytest.mark.parametrize("ndim", [1, 3])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_axis_sort_badndim(ndim, axis, index, value):
data = np.zeros([2] * ndim)
librosa.util.axis_sort(data, axis=axis, index=index, value=value)
@pytest.mark.parametrize("ndim", [2])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
def test_axis_sort(ndim, axis, index, value):
srand()
data = np.random.randn(*([10] * ndim))
if index:
Xsorted, idx = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
cmp_slice = [slice(None)] * ndim
cmp_slice[axis] = idx
assert np.allclose(data[tuple(cmp_slice)], Xsorted)
else:
Xsorted = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
compare_axis = np.mod(1 - axis, 2)
if value is None:
value = np.argmax
sort_values = value(Xsorted, axis=compare_axis)
assert np.allclose(sort_values, np.sort(sort_values))
@pytest.mark.parametrize(
"int_from, int_to",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.empty((0, 2), dtype=int)),
(np.empty((0, 2), dtype=int), np.asarray([[0, 2], [0, 4], [3, 6]])),
],
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_empty(int_from, int_to):
librosa.util.match_intervals(int_from, int_to)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_strict_fail():
int_from = np.asarray([[0, 3], [2, 4], [5, 7]])
int_to = np.asarray([[0, 2], [0, 4]])
librosa.util.match_intervals(int_from, int_to, strict=True)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize("int_to", [np.asarray([[0, 2], [0, 4], [3, 6]])])
@pytest.mark.parametrize("matches", [np.asarray([1, 1, 2])])
def test_match_intervals_strict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=True)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize(
"int_to,matches",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.asarray([1, 1, 2])),
(np.asarray([[0, 2], [0, 4]]), np.asarray([1, 1, 1])),
],
)
def test_match_intervals_nonstrict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=False)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("n", [1, 5, 20, 100])
@pytest.mark.parametrize("m", [1, 5, 20, 100])
def test_match_events(n, m):
srand()
ev1 = np.abs(np.random.randn(n))
ev2 = np.abs(np.random.randn(m))
match = librosa.util.match_events(ev1, ev2)
for i in range(len(match)):
values = np.asarray([np.abs(ev1[i] - e2) for e2 in ev2])
assert not np.any(values < values[match[i]])
@pytest.mark.parametrize("ev1,ev2", [(np.array([]), np.arange(5)), (np.arange(5), np.array([]))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_events_failempty(ev1, ev2):
librosa.util.match_events(ev1, ev2)
@pytest.mark.parametrize("events_from", [np.asarray([5, 15, 25])])
@pytest.mark.parametrize("events_to", [np.asarray([0, 10, 20, 30])])
@pytest.mark.parametrize("left,right,target", [(False, True, [10, 20, 30]), (True, False, [0, 10, 20])])
def test_match_events_onesided(events_from, events_to, left, right, target):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
match = librosa.util.match_events(events_from, events_to, left=left, right=right)
assert np.allclose(target, events_to[match])
def test_match_events_twosided():
events_from = np.asarray([5, 15, 25])
events_to = np.asarray([5, 15, 25, 30])
match = librosa.util.match_events(events_from, events_to, left=False, right=False)
assert np.allclose(match, [0, 1, 2])
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"events_from,events_to,left,right",
[
([40, 15, 25], [0, 10, 20, 30], False, True), # right-sided fail
([-1, 15, 25], [0, 10, 20, 30], True, False), # left-sided fail
([-1, 15, 25], [0, 10, 20, 30], False, False), # two-sided fail
],
)
def test_match_events_onesided_fail(events_from, events_to, left, right):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
librosa.util.match_events(events_from, events_to, left=left, right=right)
@pytest.mark.parametrize("ndim, axis", [(n, m) for n in range(1, 5) for m in range(n)])
def test_localmax(ndim, axis):
srand()
data = np.random.randn(*([7] * ndim))
lm = librosa.util.localmax(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] > data[tuple(compare_idx)]
else:
assert data[tuple(hits)] >= data[tuple(compare_idx)]
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize("pre_max", [0, 1, 10])
@pytest.mark.parametrize("post_max", [1, 10])
@pytest.mark.parametrize("pre_avg", [0, 1, 10])
@pytest.mark.parametrize("post_avg", [1, 10])
@pytest.mark.parametrize("wait", [0, 1, 10])
@pytest.mark.parametrize("delta", [0.05, 100.0])
def test_peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
peaks = librosa.util.peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait)
for i in peaks:
# Test 1: is it a peak in this window?
s = i - pre_max
if s < 0:
s = 0
t = i + post_max
diff = x[i] - np.max(x[s:t])
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 2: is it a big enough peak to count?
s = i - pre_avg
if s < 0:
s = 0
t = i + post_avg
diff = x[i] - (delta + np.mean(x[s:t]))
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 3: peak separation
assert not np.any(np.diff(peaks) <= wait)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize(
"pre_max,post_max,pre_avg,post_avg,delta,wait",
[
(-1, 1, 1, 1, 0.05, 1), # negative pre-max
(1, -1, 1, 1, 0.05, 1), # negative post-max
(1, 0, 1, 1, 0.05, 1), # 0 post-max
(1, 1, -1, 1, 0.05, 1), # negative pre-avg
(1, 1, 1, -1, 0.05, 1), # negative post-avg
(1, 1, 1, 0, 0.05, 1), # zero post-avg
(1, 1, 1, 1, -0.05, 1), # negative delta
(1, 1, 1, 1, 0.05, -1), # negative wait
],
)
def test_peak_pick_fail(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
librosa.util.peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_peak_pick_shape_fail():
# Can't pick peaks on 2d inputs
librosa.util.peak_pick(np.eye(2), 1, 1, 1, 1, 0.5, 1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("ndim", [3, 4])
def test_sparsify_rows_ndimfail(ndim):
X = np.zeros([2] * ndim)
librosa.util.sparsify_rows(X)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("quantile", [1.0, -1, 2.0])
@pytest.mark.parametrize("X", [np.ones((3, 3))])
def test_sparsify_rows_badquantile(X, quantile):
librosa.util.sparsify_rows(X, quantile=quantile)
@pytest.mark.parametrize("ndim", [1, 2])
@pytest.mark.parametrize("d", [1, 5, 10, 100])
@pytest.mark.parametrize("q", [0.0, 0.01, 0.25, 0.5, 0.99])
def test_sparsify_rows(ndim, d, q):
srand()
X = np.random.randn(*([d] * ndim)) ** 4
X = np.asarray(X)
xs = librosa.util.sparsify_rows(X, quantile=q)
if ndim == 1:
X = X.reshape((1, -1))
assert np.allclose(xs.shape, X.shape)
# And make sure that xs matches X on nonzeros
xsd = np.asarray(xs.todense())
for i in range(xs.shape[0]):
assert np.allclose(xsd[i, xs[i].indices], X[i, xs[i].indices])
# Compute row-wise magnitude marginals
v_in = np.sum(np.abs(X), axis=-1)
v_out = np.sum(np.abs(xsd), axis=-1)
# Ensure that v_out retains 1-q fraction of v_in
assert np.all(v_out >= (1.0 - q) * v_in)
@pytest.mark.parametrize(
"searchdir", [os.path.join(os.path.curdir, "tests"), os.path.join(os.path.curdir, "tests", "data")]
)
@pytest.mark.parametrize("ext", [None, "wav", "WAV", ["wav"], ["WAV"]])
@pytest.mark.parametrize("recurse", [True])
@pytest.mark.parametrize("case_sensitive", list({False} | {platform.system() != "Windows"}))
@pytest.mark.parametrize("limit", [None, 1, 2])
@pytest.mark.parametrize("offset", [0, 1, -1])
@pytest.mark.parametrize(
"output",
[
[
os.path.join(os.path.abspath(os.path.curdir), "tests", "data", s)
for s in ["test1_22050.mp3", "test1_22050.wav", "test1_44100.wav", "test2_8000.wav"]
]
],
)
def test_find_files(searchdir, ext, recurse, case_sensitive, limit, offset, output):
files = librosa.util.find_files(
searchdir, ext=ext, recurse=recurse, case_sensitive=case_sensitive, limit=limit, offset=offset
)
targets = output
if ext is not None:
# If we're only seeking wavs, bump off the mp3 file
targets = targets[1:]
s1 = slice(offset, None)
s2 = slice(limit)
if case_sensitive and ext not in (None, "wav", ["wav"]):
assert len(files) == 0
else:
assert set(files) == set(targets[s1][s2])
def test_find_files_nonrecurse():
files = librosa.util.find_files(os.path.join(os.path.curdir, "tests"), recurse=False)
assert len(files) == 0
# fail if ext is not none, we're case-sensitive, and looking for WAV
@pytest.mark.parametrize("ext", ["WAV", ["WAV"]])
def test_find_files_case_sensitive(ext):
files = librosa.util.find_files(os.path.join(os.path.curdir, "tests"), ext=ext, case_sensitive=True)
# On windows, this test won't work
if platform.system() != "Windows":
assert len(files) == 0
@pytest.mark.parametrize("x_in", np.linspace(-2, 2, num=6))
@pytest.mark.parametrize("cast", [None, np.floor, np.ceil])
def test_valid_int(x_in, cast):
z = librosa.util.valid_int(x_in, cast)
assert isinstance(z, int)
if cast is None:
assert z == int(np.floor(x_in))
else:
assert z == int(cast(x_in))
@pytest.mark.parametrize("x", np.linspace(-2, 2, num=3))
@pytest.mark.parametrize("cast", [7])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_valid_int_fail(x, cast):
# Test with a non-callable cast operator
librosa.util.valid_int(x, cast)
@pytest.mark.parametrize(
"ivals", [np.asarray([[0, 1], [1, 2]]), np.asarray([[0, 0], [1, 1]]), np.asarray([[0, 2], [1, 2]])]
)
def test_valid_intervals(ivals):
librosa.util.valid_intervals(ivals)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"ivals", [np.asarray([]), np.arange(2), np.ones((2, 2, 2)), np.ones((2, 3))] # ndim=0 # ndim=1 # ndim=3
) # ndim=2, shape[1] != 2
def test_valid_intervals_badshape(ivals):
# fail if ndim != 2 or shape[1] != 2
librosa.util.valid_intervals(ivals)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("intval", [np.asarray([[0, 1], [2, 1]])])
def test_valid_intervals_fail(intval):
# Test for issue #712: intervals must have non-negative duration
librosa.util.valid_intervals(intval)
def test_warning_deprecated():
@librosa.util.decorators.deprecated("old_version", "new_version")
def __dummy():
return True
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "deprecated" in str(out[0].message).lower()
def test_warning_moved():
@librosa.util.decorators.moved("from", "old_version", "new_version")
def __dummy():
return True
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "moved" in str(out[0].message).lower()
def test_warning_rename_kw_pass():
warnings.resetwarnings()
warnings.simplefilter("always")
ov = librosa.util.Deprecated()
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw("old", ov, "new", nv, "0", "1")
assert v == nv
# Make sure no warning triggered
assert len(out) == 0
def test_warning_rename_kw_fail():
warnings.resetwarnings()
warnings.simplefilter("always")
ov = 27
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw("old", ov, "new", nv, "0", "1")
assert v == ov
# Make sure the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "renamed" in str(out[0].message).lower()
@pytest.mark.parametrize("idx", [np.arange(10, 90, 10), np.arange(10, 90, 15)])
@pytest.mark.parametrize("idx_min", [None, 5, 15])
@pytest.mark.parametrize("idx_max", [None, 85, 100])
@pytest.mark.parametrize("step", [None, 2])
@pytest.mark.parametrize("pad", [False, True])
def test_index_to_slice(idx, idx_min, idx_max, step, pad):
slices = librosa.util.index_to_slice(idx, idx_min=idx_min, idx_max=idx_max, step=step, pad=pad)
if pad:
if idx_min is not None:
assert slices[0].start == idx_min
if idx.min() != idx_min:
slices = slices[1:]
if idx_max is not None:
assert slices[-1].stop == idx_max
if idx.max() != idx_max:
slices = slices[:-1]
if idx_min is not None:
idx = idx[idx >= idx_min]
if idx_max is not None:
idx = idx[idx <= idx_max]
idx = np.unique(idx)
assert len(slices) == len(idx) - 1
for sl, start, stop in zip(slices, idx, idx[1:]):
assert sl.start == start
assert sl.stop == stop
assert sl.step == step
@pytest.mark.parametrize("aggregate", [None, np.mean, np.sum])
@pytest.mark.parametrize("ndim,axis", [(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 2), (3, -1)])
def test_sync(aggregate, ndim, axis):
data = np.ones([6] * ndim, dtype=np.float)
# Make some slices that don't fill the entire dimension
slices = [slice(1, 3), slice(3, 4)]
dsync = librosa.util.sync(data, slices, aggregate=aggregate, axis=axis)
# Check the axis shapes
assert dsync.shape[axis] == len(slices)
s_test = list(dsync.shape)
del s_test[axis]
s_orig = list(data.shape)
del s_orig[axis]
assert s_test == s_orig
# The first slice will sum to 2 and have mean 1
idx = [slice(None)] * ndim
idx[axis] = 0
if aggregate is np.sum:
assert np.allclose(dsync[idx], 2)
else:
assert np.allclose(dsync[idx], 1)
# The second slice will sum to 1 and have mean 1
idx[axis] = 1
assert np.allclose(dsync[idx], 1)
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
def test_sync_slices(aggregate):
x = np.arange(8, dtype=float)
slices = [slice(0, 2), slice(2, 4), slice(4, 6), slice(6, 8)]
xsync = librosa.util.sync(x, slices, aggregate=aggregate)
if aggregate is np.mean:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
elif aggregate is np.max:
assert np.allclose(xsync, [1, 3, 5, 7])
else:
assert False
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
@pytest.mark.parametrize("atype", [list, np.asarray])
def test_sync_frames(aggregate, atype):
x = np.arange(8, dtype=float)
frames = atype([0, 2, 4, 6, 8])
xsync = librosa.util.sync(x, frames, aggregate=aggregate)
if aggregate is np.mean:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
elif aggregate is np.max:
assert np.allclose(xsync, [1, 3, 5, 7])
else:
assert False
@pytest.mark.parametrize("atype", [list, np.asarray])
@pytest.mark.parametrize("pad", [False, True])
def test_sync_frames_pad(atype, pad):
x = np.arange(8, dtype=float)
frames = atype([2, 4, 6])
xsync = librosa.util.sync(x, frames, pad=pad)
if pad:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
else:
assert np.allclose(xsync, [2.5, 4.5])
@pytest.mark.parametrize("data", [np.mod(np.arange(135), 5)])
@pytest.mark.parametrize("idx", [["foo", "bar"], [None], [slice(None), None]])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_sync_fail(data, idx):
librosa.util.sync(data, idx)
@pytest.mark.parametrize("power", [1, 2, 50, 100, np.inf])
@pytest.mark.parametrize("split_zeros", [False, True])
def test_softmask(power, split_zeros):
srand()
X = np.abs(np.random.randn(10, 10))
X_ref = np.abs(np.random.randn(10, 10))
# Zero out some rows
X[3, :] = 0
X_ref[3, :] = 0
M = librosa.util.softmask(X, X_ref, power=power, split_zeros=split_zeros)
assert np.all(0 <= M) and np.all(M <= 1)
if split_zeros and np.isfinite(power):
assert np.allclose(M[3, :], 0.5)
else:
assert not np.any(M[3, :]), M[3]
def test_softmask_int():
X = 2 * np.ones((3, 3), dtype=np.int32)
X_ref = np.vander(np.arange(3))
M1 = librosa.util.softmask(X, X_ref, power=1)
M2 = librosa.util.softmask(X_ref, X, power=1)
assert np.allclose(M1 + M2, 1)
@pytest.mark.parametrize(
"x,x_ref,power,split_zeros",
[
(-np.ones(3), np.ones(3), 1, False),
(np.ones(3), -np.ones(3), 1, False),
(np.ones(3), np.ones(4), 1, False),
(np.ones(3), np.ones(3), 0, False),
(np.ones(3), np.ones(3), -1, False),
],
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_softmask_fail(x, x_ref, power, split_zeros):
librosa.util.softmask(x, x_ref, power=power, split_zeros=split_zeros)
@pytest.mark.parametrize(
"x,value",
[
(1, np.finfo(np.float32).tiny),
(np.ones(3, dtype=int), np.finfo(np.float32).tiny),
(np.ones(3, dtype=np.float32), np.finfo(np.float32).tiny),
(1.0, np.finfo(np.float64).tiny),
(np.ones(3, dtype=np.float64), np.finfo(np.float64).tiny),
(1j, np.finfo(np.complex128).tiny),
(np.ones(3, dtype=np.complex64), np.finfo(np.complex64).tiny),
(np.ones(3, dtype=np.complex128), np.finfo(np.complex128).tiny),
],
)
def test_tiny(x, value):
assert value == librosa.util.tiny(x)
def test_util_fill_off_diagonal_8_8():
# Case 1: Square matrix (N=M)
mut_x = np.ones((8, 8))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array(
[
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
]
)
assert np.array_equal(mut_x, gt_x)
assert np.array_equal(mut_x, gt_x.T)
def test_util_fill_off_diagonal_8_12():
# Case 2a: N!=M
mut_x = np.ones((8, 12))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array(
[
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
]
)
assert np.array_equal(mut_x, gt_x)
# Case 2b: (N!=M).T
mut_x = np.ones((8, 12)).T
librosa.util.fill_off_diagonal(mut_x, 0.25)
assert np.array_equal(mut_x, gt_x.T)
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
def test_nnls_vector(dtype_A, dtype_B):
srand()
# Make a random basis
A = np.random.randn(5, 7).astype(dtype_A)
# Make a random latent vector
x = np.random.randn(A.shape[1]) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-6
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
@pytest.mark.parametrize("x_size", [3, 30])
def test_nnls_matrix(dtype_A, dtype_B, x_size):
srand()
# Make a random basis
A = np.random.randn(5, 7).astype(dtype_A)
# Make a random latent matrix
# when x_size is 3, B is 7x3 (smaller than A)
x = np.random.randn(A.shape[1], x_size) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-5
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
@pytest.mark.parametrize("x_size", [16, 64, 256])
def test_nnls_multiblock(dtype_A, dtype_B, x_size):
srand()
# Make a random basis
A = np.random.randn(7, 1025).astype(dtype_A)
# Make a random latent matrix
# when x_size is 3, B is 7x3 (smaller than A)
x = np.random.randn(A.shape[1], x_size) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-4
@pytest.fixture
def psig():
# [[0, 1, 2, 3, 4]]
# axis=1 or -1 ==> [-1.5, 1, 1, 1, -1.5]
# axis=0 ==> [0, 0, 0, 0, 0]
return np.arange(0, 5, dtype=float)[np.newaxis]
@pytest.mark.parametrize("edge_order", [1, 2])
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_cyclic_gradient(psig, edge_order, axis):
grad = librosa.util.cyclic_gradient(psig, edge_order=edge_order, axis=axis)
assert grad.shape == psig.shape
assert grad.dtype == psig.dtype
# Check the values
if axis == 0:
assert | np.allclose(grad, 0) | numpy.allclose |
import numpy as np
from numba import njit
import scipy as sp
import scipy.optimize as spo
from netket.stats import (
statistics as _statistics,
mean as _mean,
sum_inplace as _sum_inplace,
)
from netket.utils import (
MPI_comm as _MPI_comm,
n_nodes as _n_nodes,
node_number as _rank
)
from mpi4py import MPI
from netket.machine import QGPSLinExp
class SupervisedLearning():
def __init__(self, machine):
self.machine = machine
def mean_squared_error(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
local_error = np.sum(weightings * abs(np.exp(self.machine.log_val(basis)) - target_amplitudes)**2)
else:
local_error = 0.0
return _MPI_comm.allreduce(local_error)
def mean_squared_error_der(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = np.exp(self.machine.log_val(basis))
der_log = self.machine.der_log(basis)
residuals = (estimates - target_amplitudes).conj()*weightings
der = 2 * np.einsum("ij,i,i->j", der_log, estimates, residuals)
if self.machine.has_complex_parameters:
der = np.concatenate((der.real, (1.j*der).real))
else:
if self.machine.has_complex_parameters:
der = np.zeros(2*self.machine._npar)
else:
der = np.zeros(self.machine._npar)
return _sum_inplace(der)
def mean_squared_error_hess(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = np.exp(self.machine.log_val(basis))
der = self.machine.der_log(basis)
proper_der = (der.T * estimates)
hess_el = self.machine.hess(basis)
wfn_hess_first_term = hess_el.T * estimates
wfn_hess_sec_term = np.einsum("ij,jk->ikj", proper_der, der)
wfn_hess = wfn_hess_first_term + wfn_hess_sec_term
residuals = (estimates-target_amplitudes).conj()*weightings
hess_first_term = (np.dot(wfn_hess, residuals))
hess_sec_term = np.matmul(proper_der*weightings, proper_der.T.conj())
if self.machine.has_complex_parameters:
hess_first_term = np.block([[hess_first_term.real, (1.j*hess_first_term).real],[(1.j*hess_first_term).real,-hess_first_term.real]])
hess_sec_term = np.block([[hess_sec_term, -1.j*hess_sec_term],[1.j*hess_sec_term, hess_sec_term]])
else:
if self.machine.has_complex_parameters:
hess = np.zeros(2*self.machine._npar)
else:
hess = np.zeros(self.machine._npar)
hess = 2 * (hess_first_term + hess_sec_term)
return _sum_inplace(hess)
def overlap(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
predictions = np.exp(self.machine.log_val(basis))
overlap = abs(_MPI_comm.allreduce(np.sum(weightings * predictions * target_amplitudes.conj())))**2
norm = _MPI_comm.allreduce(np.sum(weightings * abs(predictions)**2)) * _MPI_comm.allreduce(np.sum(weightings * abs(target_amplitudes)**2))
return overlap/norm
def overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = np.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _sum_inplace(np.einsum("i,ij->j", (weightings * estimates * target_amplitudes), der))
norm1 = _MPI_comm.allreduce(np.sum(weightings * estimates * target_amplitudes))
overlap2 = _sum_inplace(np.einsum("i,ij->j", (weightings * abs(estimates)**2), der))
norm2 = _MPI_comm.allreduce(np.sum(weightings * abs(estimates)**2))
derivative = overlap1/norm1 - overlap2/norm2
overlap = self.overlap(basis, target_amplitudes, weightings)
if self.machine.has_complex_parameters:
derivative = np.concatenate((derivative.real, derivative.imag))
return overlap * derivative.real
def neg_log_overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = np.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _sum_inplace(np.einsum("i,ij->j", (weightings * estimates * target_amplitudes), der))
norm1 = _MPI_comm.allreduce(np.sum(weightings * estimates * target_amplitudes))
overlap2 = _sum_inplace(np.einsum("i,ij->j", (weightings * abs(estimates)**2), der))
norm2 = _MPI_comm.allreduce(np.sum(weightings * abs(estimates)**2))
derivative = -overlap1/norm1 + overlap2/norm2
if self.machine.has_complex_parameters:
derivative = np.concatenate((derivative.real, derivative.imag))
return derivative.real
def bayes_loss(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
return beta/2 * self.mean_squared_error(basis, target_amplitudes, weightings) + 0.5 * np.sum((parameters**2) * alpha)
def grad_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
der = beta/2 * self.mean_squared_error_der(basis, target_amplitudes, weightings)
der += parameters * alpha
return der
def hess_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
hess = beta/2 * self.mean_squared_error_hess(basis, target_amplitudes, weightings)
hess += np.diag(alpha)
return hess
def get_bias(self, target_amplitudes, weightings=None, dtype=complex):
if len(target_amplitudes) > 0:
if weightings is None:
local_sum = np.sum(np.log(target_amplitudes))
n_terms = len(target_amplitudes)
else:
local_sum = np.sum(np.log(target_amplitudes)*weightings)
n_terms = np.sum(weightings)
else:
local_sum = 0.
n_terms = 1
return _MPI_comm.allreduce(local_sum)/_MPI_comm.allreduce(n_terms)
class QGPSLearning(SupervisedLearning):
def __init__(self, machine, init_alpha=1.0, bond_min_id=0, bond_max_id=None, complex_expand=True):
super().__init__(machine)
self.K = None
self.weights = None
self.site_prod = None
self.confs = None
self.ref_site = None
self.bond_min_id = bond_min_id
if bond_max_id is None:
self.bond_max_id = self.machine._epsilon.shape[1]
else:
self.bond_max_id = bond_max_id
self.n_bond = self.bond_max_id - self.bond_min_id
self.complex_expand = complex_expand
self.local_dim = self.machine.hilbert._local_size
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat = np.ones((self.machine._epsilon.shape[0], self.local_dim*2*self.n_bond))*init_alpha
else:
self.alpha_mat = np.ones((self.machine._epsilon.shape[0], self.local_dim*self.n_bond))*init_alpha
self.alpha_cutoff = 1.e10
self.kern_cutoff = 1.e-15
self.sinv_fallback = True
self.alpha_convergence_tol = 1.e-15
@staticmethod
@njit()
def kernel_mat_inner(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
if sym_spin_flip_sign[t] * confs[i, Smap[t, ref_site]] < 0.0:
K[i, 2*x] += site_prod[i, x, t]
else:
K[i, 2*x+1] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast(epsilon, bond_min, bond_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_min, bond_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def update_site_prod_fast(epsilon, bond_min, bond_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = np.finfo(np.double).eps
for (x, w) in enumerate(range(bond_min, bond_max)):
if abs(epsilon[ref_site, w, 0]) > 1.e4 * eps and abs(epsilon[ref_site, w, 1]) > 1.e4 * eps:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site]] < 0:
site_product[i, x, t] /= epsilon[ref_site, w, 0]
else:
site_product[i, x, t] /= epsilon[ref_site, w, 1]
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site_old]] < 0:
site_product[i, x, t] *= epsilon[ref_site_old, w, 0]
else:
site_product[i, x, t] *= epsilon[ref_site_old, w, 1]
else:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
site_product[i, x, t] = 1.0
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def kernel_mat_inner_fermion(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
index = round(confs[i, Smap[t, ref_site]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
K[i, 4*x + index] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast_fermion(epsilon, bond_min, bond_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_min, bond_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
index = round(confs[i, Smap[t, j]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[j, w, index]
return site_product
@staticmethod
@njit()
def update_site_prod_fast_fermion(epsilon, bond_min, bond_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = np.finfo(np.double).eps
for (x, w) in enumerate(range(bond_min, bond_max)):
if np.min(np.abs(epsilon[ref_site, w, :])) > 1.e4 * eps:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
index = round(confs[i, Smap[t, ref_site]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] /= epsilon[ref_site, w, index]
index = round(confs[i, Smap[t, ref_site_old]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[ref_site_old, w, index]
else:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
site_product[i, x, t] = 1.0
for j in range(confs.shape[1]):
if j != ref_site:
index = round(confs[i, Smap[t, j]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[j, w, index]
return site_product
def compute_site_prod(self):
if self.site_prod is None:
self.site_prod = np.zeros((self.confs.shape[0], self.n_bond, self.machine._Smap.shape[0]), dtype=self.machine._epsilon.dtype)
if self.local_dim == 2:
self.site_prod = self.compute_site_prod_fast(self.machine._epsilon, self.bond_min_id, self.bond_max_id, self.ref_site,
self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
else:
self.site_prod = self.compute_site_prod_fast_fermion(self.machine._epsilon, self.bond_min_id, self.bond_max_id, self.ref_site,
self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
self.site_prod_ref_site = self.ref_site
def update_site_prod(self):
if self.site_prod_ref_site != self.ref_site:
if self.local_dim == 2:
self.site_prod = self.update_site_prod_fast(self.machine._epsilon, self.bond_min_id, self.bond_max_id, self.ref_site,
self.site_prod_ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
else:
self.site_prod = self.update_site_prod_fast_fermion(self.machine._epsilon, self.bond_min_id, self.bond_max_id, self.ref_site,
self.site_prod_ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.site_prod)
self.site_prod_ref_site = self.ref_site
def set_kernel_mat(self, confs, multiplication=None):
update_K = False
assert(self.ref_site is not None)
if self.site_prod is None or self.confs is None or np.sum(self.confs != confs) != 0:
self.confs = confs
self.compute_site_prod()
update_K = True
elif self.ref_site != self.site_prod_ref_site:
self.update_site_prod()
update_K = True
if self.K is None:
self.K = np.zeros((confs.shape[0], self.n_bond * self.local_dim), dtype=self.machine._epsilon.dtype)
update_K = True
if update_K:
if self.local_dim == 2:
self.K = self.kernel_mat_inner(self.site_prod, self.ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.K)
else:
self.K = self.kernel_mat_inner_fermion(self.site_prod, self.ref_site, self.confs, self.machine._Smap,
self.machine._sym_spin_flip_sign, self.K)
if multiplication is not None:
# TODO: maybe this should be done better
self.K = (self.K.T * multiplication).T
return self.K
def reset(self):
self.site_prod = None
self.K = None
def setup_fit_alpha_dep(self):
self.active_elements = self.alpha_mat[self.ref_site,:] < self.alpha_cutoff
if self.complex_expand and self.machine.dtype==complex:
self.KtK_alpha = self.KtK + np.diag(self.alpha_mat[self.ref_site,:]/2)
else:
self.KtK_alpha = self.KtK + np.diag(self.alpha_mat[self.ref_site,:])
self.valid_kern = abs(np.diag(self.KtK)) > self.kern_cutoff
if np.sum(self.active_elements) > 0:
# try:
if False:
L = sp.linalg.cholesky(self.KtK_alpha[np.ix_(self.active_elements, self.active_elements)], lower=True)
self.Sinv = sp.linalg.solve_triangular(L, np.eye(self.KtK_alpha.shape[0]), check_finite=False, lower=True)
weights = sp.linalg.cho_solve((L, True), self.y[self.active_elements])
self.cholesky = True
else:
# except:
self.Sinv = np.linalg.inv(self.KtK_alpha[np.ix_(self.active_elements, self.active_elements)])
if self.sinv_fallback:
weights = self.Sinv.dot(self.y[self.active_elements])
else:
weights = sp.linalg.lstsq(self.KtK_alpha[np.ix_(self.active_elements, self.active_elements)], self.y[self.active_elements])[0]
self.cholesky = False
# if _rank == 0:
# print("Warning! Cholesky failed.")
if self.weights is None:
if not self.complex_expand and self.machine.dtype==complex:
self.weights = np.zeros(self.alpha_mat.shape[1], dtype=complex)
else:
self.weights = np.zeros(self.alpha_mat.shape[1], dtype=float)
else:
self.weights.fill(0.0)
if np.sum(self.active_elements) > 0:
# potentially distribute weights across processes
self.weights[self.active_elements] = weights
def log_marg_lik_alpha_der(self):
derivative_alpha = 1/(self.alpha_mat[self.ref_site, :])
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
derivative_alpha[self.active_elements] -= 0.5 * np.sum(abs(self.Sinv) ** 2, 0)
else:
derivative_alpha[self.active_elements] -= np.sum(abs(self.Sinv) ** 2, 0)
else:
if self.complex_expand and self.machine.dtype==complex:
derivative_alpha[self.active_elements] -= np.diag(0.5 * self.Sinv).real
else:
derivative_alpha[self.active_elements] -= np.diag(self.Sinv).real
derivative_alpha -= (self.weights.conj() * self.weights).real
if self.complex_expand or self.machine.dtype==float:
derivative_alpha *= 0.5
return derivative_alpha.real
def set_up_prediction(self, confset):
if self.ref_site is None:
self.ref_site = 0
self.set_kernel_mat(confset)
def squared_error(self, confset, target_amplitudes, weightings = None):
errors = abs(self.predict(confset) - target_amplitudes)**2
if weightings is not None:
errors *= weightings
return _MPI_comm.allreduce(np.sum(errors))
def squared_error_log_space(self, confset, target_amplitudes, weightings = None):
errors = abs(np.log(self.predict(confset)) - np.log(target_amplitudes))**2
if weightings is not None:
errors *= weightings
return _MPI_comm.allreduce(np.sum(errors))
class QGPSLearningExp(QGPSLearning):
def __init__(self, machine, init_alpha = 1.0, init_noise_tilde = 1.e-1, complex_expand=True):
if isinstance(machine, QGPSLinExp):
min_bond_id = machine._n_bond_lin
else:
min_bond_id = 0
super().__init__(machine, init_alpha=init_alpha, bond_min_id=min_bond_id, complex_expand=complex_expand)
self.noise_tilde = init_noise_tilde
def get_bias(self, target_amplitudes, weightings=None, dtype=complex):
if weightings is None:
return _mean(np.log(target_amplitudes))
else:
return _MPI_comm.allreduce(np.sum(np.log(target_amplitudes)*weightings))/_MPI_comm.allreduce(np.sum(weightings))
def predict(self, confset):
assert(confset.size > 0)
self.set_up_prediction(confset)
return np.exp(self.K.dot((self.machine._epsilon[self.ref_site, self.bond_min_id:self.bond_max_id, :]).flatten()))
def setup_fit_noise_dep(self, weightings=None):
if self.noise_tilde == 0.:
self.S_diag = np.ones(len(self.exp_amps))
else:
self.S_diag = 1/(np.log1p(self.noise_tilde/(abs(self.exp_amps)**2)))
if weightings is not None:
self.S_diag *= weightings
self.weightings = weightings
self.KtK = _sum_inplace(np.dot(self.K.conj().T, np.einsum("i,ij->ij", self.S_diag, self.K)))
self.y = _sum_inplace(self.K.conj().T.dot(self.S_diag * self.fit_data))
if self.complex_expand and self.machine.dtype==complex:
self.KtK = np.block([[self.KtK.real, -self.KtK.imag],[self.KtK.imag, self.KtK.real]])
self.y = np.concatenate((self.y.real, self.y.imag))
self.setup_fit_alpha_dep()
def setup_fit(self, confset, target_amplitudes, ref_site, multiplication=None, weightings=None):
self.ref_site = ref_site
self.exp_amps = target_amplitudes.astype(self.machine._epsilon.dtype)
if self.machine._epsilon.dtype == float:
if multiplication is not None:
self.fit_data = np.log(abs(self.exp_amps/multiplication))
else:
self.fit_data = np.log(abs(self.exp_amps))
else:
if multiplication is not None:
self.fit_data = np.log(self.exp_amps/multiplication)
else:
self.fit_data = np.log(self.exp_amps)
self.set_kernel_mat(confset)
self.setup_fit_noise_dep(weightings=weightings)
def log_marg_lik(self):
if self.weightings is not None:
if self.machine.dtype==complex:
log_lik = -(np.sum(self.weightings * np.log(np.pi/(self.S_diag/self.weightings))))
else:
log_lik = -(np.sum(self.weightings * np.log(2*np.pi/(self.S_diag/self.weightings))))
else:
if self.machine.dtype==complex:
log_lik = -(np.sum(np.log(np.pi/self.S_diag)))
else:
log_lik = -(np.sum(np.log(2*np.pi/self.S_diag)))
log_lik -= np.dot(self.fit_data.conj(), self.S_diag * self.fit_data)
log_lik = _MPI_comm.allreduce(log_lik)
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * np.sum(np.log(0.5 * abs(np.diag(self.Sinv))**2))
else:
log_lik += 2 * np.sum(np.log(abs(np.diag(self.Sinv))))
else:
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * np.linalg.slogdet(0.5 * self.Sinv)[1]
else:
log_lik += np.linalg.slogdet(self.Sinv)[1]
if self.complex_expand and self.machine.dtype==complex:
log_lik += 0.5 * np.sum(np.log(self.alpha_mat[self.ref_site, self.active_elements]))
else:
log_lik += np.sum(np.log(self.alpha_mat[self.ref_site, self.active_elements]))
weights = self.weights[self.active_elements]
log_lik += np.dot(weights.conj(), np.dot(self.KtK_alpha[np.ix_(self.active_elements, self.active_elements)], weights))
if self.machine.dtype==float:
log_lik *= 0.5
return log_lik.real
def log_marg_lik_noise_der(self):
del_S = 1/((abs(self.exp_amps)**2) * (1 + self.noise_tilde/(abs(self.exp_amps)**2)))
Delta_S = - (self.S_diag**2 * del_S)
if self.weightings is not None:
Delta_S /= self.weightings
derivative_noise = -np.sum(self.S_diag * del_S)
K = self.K
KtK_der = self.K.conj().T.dot(np.diag(Delta_S).dot(self.K))
if self.complex_expand and self.machine.dtype==complex:
KtK_der = np.block([[KtK_der.real, -KtK_der.imag],[KtK_der.imag, KtK_der.real]])
K = np.hstack((K, 1.j * K))
K = K[:,self.active_elements]
KtK_der = KtK_der[np.ix_(self.active_elements, self.active_elements)]
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
derivative_noise -= 0.5 * np.trace(KtK_der.dot(self.Sinv.conj().T.dot(self.Sinv)))
else:
derivative_noise -= np.trace(KtK_der.dot(self.Sinv.conj().T.dot(self.Sinv)))
else:
if self.complex_expand and self.machine.dtype==complex:
derivative_noise -= 0.5 * np.trace(KtK_der.dot(self.Sinv))
else:
derivative_noise -= np.trace(KtK_der.dot(self.Sinv))
weights = self.weights[self.active_elements]
derivative_noise -= self.fit_data.conj().dot(Delta_S*self.fit_data)
derivative_noise -= weights.conj().dot(KtK_der.dot(weights))
derivative_noise += 2*self.fit_data.conj().dot(Delta_S*K.dot(weights))
derivative_noise = _MPI_comm.allreduce(derivative_noise)
if self.machine.dtype==float:
derivative_noise *= 0.5
return derivative_noise.real
def opt_alpha(self, max_iterations=None, rvm=False):
alpha_old = self.alpha_mat[self.ref_site, :].copy()
converged = False
j = 0
if max_iterations is not None:
if j >= max_iterations:
converged = True
while not converged:
if self.cholesky:
if self.complex_expand and self.machine.dtype==complex:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*0.5*np.sum(abs(self.Sinv) ** 2, 0))
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*np.sum(abs(self.Sinv) ** 2, 0))
else:
if self.complex_expand and self.machine.dtype==complex:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*0.5*np.diag(self.Sinv).real)
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*np.diag(self.Sinv).real)
if rvm:
self.alpha_mat[self.ref_site, self.active_elements] = (gamma/((self.weights.conj()*self.weights)[self.active_elements])).real
else:
self.alpha_mat[self.ref_site, self.active_elements] = ((np.sum(gamma)/(self.weights.conj().dot(self.weights))).real)
self.setup_fit_alpha_dep()
j += 1
if np.sum(abs(self.alpha_mat[self.ref_site, :] - alpha_old)**2) < self.alpha_convergence_tol:
converged = True
np.copyto(alpha_old, self.alpha_mat[self.ref_site, :])
if max_iterations is not None:
if j >= max_iterations:
converged = True
def fit_step(self, confset, target_amplitudes, ref_site, noise_bounds=[(None, None)],
opt_alpha=True, opt_noise=True, max_alpha_iterations=None, max_noise_iterations=None, rvm=False,
multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if opt_noise:
alpha_init = self.alpha_mat.copy()
def ML(x):
self.noise_tilde = np.exp(x[0])
if opt_alpha:
np.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(max_iterations=max_alpha_iterations, rvm=rvm)
return -self.log_marg_lik()
def derivative(x):
self.noise_tilde = np.exp(x[0])
if opt_alpha:
np.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(max_iterations=max_alpha_iterations, rvm=rvm)
der_noise = self.log_marg_lik_noise_der()
return - der_noise * np.exp(x)
def update_alpha(x):
self.noise_tilde = np.exp(x[0])
if opt_alpha:
self.opt_alpha(max_iterations=max_alpha_iterations, rvm=rvm)
np.copyto(alpha_init, self.alpha_mat)
if max_noise_iterations is not None:
opt = sp.optimize.minimize(ML, np.log(self.noise_tilde), options={"maxiter" : max_noise_iterations}, jac=derivative, bounds=noise_bounds, callback=update_alpha)
else:
opt = sp.optimize.minimize(ML, np.log(self.noise_tilde), jac=derivative, bounds=noise_bounds, callback=update_alpha)
self.noise_tilde = np.exp(opt.x)[0]
if opt_alpha:
np.copyto(self.alpha_mat, alpha_init)
self.setup_fit_noise_dep(weightings=weightings)
if opt_alpha:
self.opt_alpha(max_iterations=max_alpha_iterations, rvm=rvm)
self.machine._epsilon[ref_site, self.bond_min_id:self.bond_max_id, :] = self.weights[:self.local_dim*self.n_bond].reshape(self.n_bond, self.local_dim)
if self.complex_expand and self.machine.dtype==complex:
self.machine._epsilon[ref_site, self.bond_min_id:self.bond_max_id, :] += 1.j * self.weights[self.local_dim*self.n_bond:].reshape(self.n_bond, self.local_dim)
return
'''
parts of the following code are based on the code from https://github.com/AmazaspShumik/sklearn-bayes
which is published under the following MIT license:
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def compute_sparsity_quantities(self):
bxy = self.y
bxx = np.diag(self.KtK)
if self.cholesky:
xxr = np.dot(self.KtK[:, self.active_elements], self.Sinv.conj().T)
rxy = np.dot(self.Sinv, self.y[self.active_elements])
S = bxx - np.sum(abs(xxr) ** 2, axis=1)
Q = bxy - np.dot(xxr, rxy)
else:
XXa = self.KtK[:, self.active_elements]
XS = np.dot(XXa, self.Sinv)
S = bxx - np.sum(XS * XXa, 1)
Q = bxy - np.dot(XS, self.y[self.active_elements])
S = S.real
Q = Q.real
# Use following:
# (EQ 1) q = A*Q/(A - S) ; s = A*S/(A-S), so if A = np.PINF q = Q, s = S
qi = np.copy(Q)
si = np.copy(S)
# If A is not np.PINF, then it should be 'active' feature => use (EQ 1)
Qa, Sa = Q[self.active_elements], S[self.active_elements]
if self.complex_expand and self.machine.dtype==complex:
qi[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Qa / (self.alpha_mat[self.ref_site, self.active_elements] - 2*Sa)
si[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Sa / (self.alpha_mat[self.ref_site, self.active_elements] - 2*Sa)
else:
qi[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Qa / (self.alpha_mat[self.ref_site, self.active_elements] - Sa)
si[self.active_elements] = self.alpha_mat[self.ref_site, self.active_elements] * Sa / (self.alpha_mat[self.ref_site, self.active_elements] - Sa)
return [si, qi, S, Q]
def update_precisions(self, s, q, S, Q):
deltaL = np.zeros(Q.shape[0])
theta = abs(q) ** 2 - s
add = (theta > 0) * (self.active_elements == False)
recompute = (theta > 0) * (self.active_elements == True)
delete = (theta <= 0) * (self.active_elements == True)
# compute sparsity & quality parameters corresponding to features in
# three groups identified above
Qadd, Sadd = Q[add], S[add]
if self.complex_expand and self.machine.dtype==complex:
Qrec, Srec, Arec = Q[recompute], S[recompute], self.alpha_mat[self.ref_site, recompute]/2
Qdel, Sdel, Adel = Q[delete], S[delete], self.alpha_mat[self.ref_site, delete]/2
else:
Qrec, Srec, Arec = Q[recompute], S[recompute], self.alpha_mat[self.ref_site, recompute]
Qdel, Sdel, Adel = Q[delete], S[delete], self.alpha_mat[self.ref_site, delete]
# compute new alpha's (precision parameters) for features that are
# currently in model and will be recomputed
Anew = s[recompute] ** 2 / (theta[recompute])
delta_alpha = (1. / Anew) - (1. / Arec)
# compute change in log marginal likelihood
deltaL[add] = (abs(Qadd) ** 2 - Sadd) / Sadd + np.log(Sadd / abs(Qadd) ** 2)
deltaL[recompute] = abs(Qrec) ** 2 / (Srec + 1. / delta_alpha) - np.log(1 + Srec * delta_alpha)
deltaL[delete] = abs(Qdel) ** 2 / (Sdel - Adel) - np.log(1 - Sdel / Adel)
deltaL = np.nan_to_num(deltaL, nan=np.NINF, posinf=np.NINF, neginf=np.NINF)
# find feature which caused largest change in likelihood
feature_index = np.argmax(deltaL)
if theta[feature_index] > 0:
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, feature_index] = 2 * (s[feature_index] ** 2 / theta[feature_index])
else:
self.alpha_mat[self.ref_site, feature_index] = s[feature_index] ** 2 / theta[feature_index]
else:
# at least one active features
if self.active_elements[feature_index] == True and np.sum(self.active_elements) >= 2:
self.alpha_mat[self.ref_site, feature_index] = np.PINF
return
def fit_step_growing_RVM(self, confset, target_amplitudes, ref_site,alpha_iterations=None, multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if np.max(self.active_elements) == 0:
if np.min(abs(np.diag(self.KtK))) < np.finfo(np.float32).eps:
self.alpha_mat[self.ref_site, 0] = np.finfo(np.float32).eps
else:
projections = (abs(self.y) **2 / np.diag(self.KtK))
ind = np.argmax(projections)
alpha_est = (((np.diag(self.KtK))**2 / (abs(self.y)**2 - np.diag(self.KtK))).real)[ind]
if alpha_est > 0.:
self.alpha_mat[self.ref_site, ind] = alpha_est
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, ind] *= 2
else:
self.alpha_mat[self.ref_site, ind] = 1.
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat[self.ref_site, ind] *= 2
print(alpha_est)
self.setup_fit_alpha_dep()
for i in range(alpha_iterations):
s, q, S, Q = self.compute_sparsity_quantities()
self.update_precisions(s, q, S, Q)
self.setup_fit_alpha_dep()
self.machine._epsilon[ref_site, self.bond_min_id:self.bond_max_id, :] = self.weights[:self.local_dim*self.n_bond].reshape(self.n_bond, self.local_dim)
if self.complex_expand and self.machine.dtype==complex:
self.machine._epsilon[ref_site, self.bond_min_id:self.bond_max_id, :] += 1.j * self.weights[self.local_dim*self.n_bond:].reshape(self.n_bond, self.local_dim)
return
class QGPSLearningLin(QGPSLearning):
def __init__(self, machine, init_alpha = 1.0, init_noise = 1.e-1, complex_expand=True):
if isinstance(machine, QGPSLinExp):
super().__init__(machine, init_alpha=init_alpha, bond_max_id=machine._n_bond_lin, complex_expand=complex_expand)
else:
super().__init__(machine, init_alpha=init_alpha, complex_expand=complex_expand)
self.noise = init_noise
self.noise_convergence_tol = self.alpha_convergence_tol
def get_bias(self, target_amplitudes, dtype=complex):
return _MPI_comm.allreduce(np.max(np.abs(target_amplitudes)), op=MPI.MAX)
def predict(self, confset):
assert(confset.size > 0)
self.set_up_prediction(confset)
return self.K.dot((self.machine._epsilon[self.ref_site, self.bond_min_id:self.bond_max_id, :]).flatten())
def setup_KtK(self, weightings=None):
if weightings is not None:
self.KtK_no_noise = _sum_inplace(np.dot(self.K.conj().T, np.einsum("i,ij->ij", weightings, self.K)))
self.y_no_noise = _sum_inplace(self.K.conj().T.dot(weightings * self.fit_data))
else:
self.KtK_no_noise = _sum_inplace(np.dot(self.K.conj().T, self.K))
self.y_no_noise = _sum_inplace(self.K.conj().T.dot(self.fit_data))
self.weightings = weightings
self.setup_fit_noise_dep()
def setup_fit_noise_dep(self):
self.KtK = self.KtK_no_noise/self.noise
self.y = self.y_no_noise/self.noise
if self.complex_expand and self.machine.dtype==complex:
self.KtK = np.block([[self.KtK.real, -self.KtK.imag],[self.KtK.imag, self.KtK.real]])
self.y = np.concatenate((self.y.real, self.y.imag))
self.setup_fit_alpha_dep()
def setup_fit(self, confset, target_amplitudes, ref_site, multiplication=None, weightings=None):
self.ref_site = ref_site
self.fit_data = target_amplitudes.astype(self.machine._epsilon.dtype)
self.set_kernel_mat(confset, multiplication=multiplication)
self.setup_KtK(weightings=weightings)
if weightings is not None:
N = np.sum(weightings)
else:
N = len(self.fit_data)
self.N = _MPI_comm.allreduce(N)
def log_marg_lik(self):
if self.weightings is not None:
N = np.sum(self.weightings)
else:
N = len(self.y)
log_lik = -N * np.log(2*np.pi)
log_lik -= N * np.log(self.noise)
if self.weightings is None:
log_lik -= np.dot(self.fit_data.conj(), self.fit_data)/self.noise
else:
log_lik -= np.dot(self.fit_data.conj(), self.weightings*self.fit_data)/self.noise
log_lik = _MPI_comm.allreduce(log_lik)
if self.cholesky:
log_lik += 2*np.sum(np.log(np.diag(self.Sinv)))
else:
log_lik += np.linalg.slogdet(self.Sinv)[1]
log_lik += np.sum(np.log(self.alpha_mat[self.ref_site, :]))
weights = self.weights[self.active_elements]
log_lik += np.dot(weights.conj(), np.dot(self.KtK_alpha[np.ix_(self.active_elements, self.active_elements)], weights))
return 0.5*log_lik.real
def fit_step(self, confset, target_amplitudes, ref_site, opt_alpha=True, opt_noise=True, max_iterations=None, rvm=False,
multiplication=None, weightings=None):
self.setup_fit(confset, target_amplitudes, ref_site, multiplication=multiplication, weightings=weightings)
if opt_alpha or opt_noise:
alpha_old = self.alpha_mat[self.ref_site, :].copy()
noise_old = self.noise
converged = False
j = 0
if max_iterations is not None:
if j >= max_iterations:
converged = True
while not converged:
if self.cholesky:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*np.sum(abs(self.Sinv) ** 2, 0))
else:
gamma = (1 - (self.alpha_mat[self.ref_site, self.active_elements])*np.diag(self.Sinv).real)
if opt_alpha:
if rvm:
self.alpha_mat[self.ref_site, self.active_elements] = (gamma/((self.weights.conj()*self.weights)[self.active_elements])).real
else:
self.alpha_mat[self.ref_site, :] = ((np.sum(gamma)/(self.weights.conj().dot(self.weights))).real)
if opt_noise:
kern_mat = self.K
if self.complex_expand and self.machine.dtype==complex:
kern_mat = | np.hstack((kern_mat, 1.j * kern_mat)) | numpy.hstack |
"""GaussianMLPMultitaskPolicy."""
import akro
import numpy as np
import tensorflow as tf
from metarl.tf.models import GaussianMLPModel
from metarl.tf.policies.multitask_policy import StochasticMultitaskPolicy
class GaussianMLPMultitaskPolicy(StochasticMultitaskPolicy):
"""GaussianMLPMultitaskPolicy.
Args:
env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.
embedding (metarl.tf.embeddings.Embedding): Embedding network.
task_space (akro.Box): Space of the task.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_output_nonlinearity (callable): Nonlinearity for output layer in
the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_parameterization (str): How the std should be parametrized. There
are a few options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
embedding,
task_space,
name='GaussianMLPMultitaskPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=None,
std_parameterization='exp',
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Box)
super().__init__(env_spec, embedding, task_space, name)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.flat_dim
self.model = GaussianMLPModel(
output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
name='GaussianMLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self.obs_dim))
task_input = self._embedding.input
latent_input = tf.compat.v1.placeholder(
tf.float32, shape=(None, self._embedding.latent_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
with tf.variable_scope('concat_latent_obs'):
latent_state_input = tf.concat(
[latent_input, state_input], axis=-1)
self.model.build(latent_state_input, name='from_latent')
# Connect with embedding network's latent output
with tf.variable_scope('concat_embed_obs'):
latent_dist_info_sym = self._embedding.dist_info_sym(
task_input, name='dist_info_sym')
latent_var = self._embedding.distribution.sample_sym(
latent_dist_info_sym)
embed_state_input = tf.concat(
[latent_var, state_input], axis=-1)
self.model.build(embed_state_input, name='default')
self._f_dist_latent_obs = tf.compat.v1.get_default_session().make_callable(
[
self.model.networks['from_latent'].mean,
self.model.networks['from_latent'].log_std
],
feed_list=[latent_input, state_input])
self._f_dist_task_obs = tf.compat.v1.get_default_session().make_callable(
[
self.model.networks['default'].mean,
self.model.networks['default'].log_std,
self._embedding.latent_mean,
self._embedding.latent_std_param,
],
feed_list=[task_input, state_input])
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
(np.ndarray): Action sampled from the policy.
"""
flat_task_obs = self.task_observation_space.flatten(observation)
flat_task, flat_obs = self.split_observation(flat_task_obs)
(action_mean, action_log_std, latent_mean, latent_log_std) = self._f_dist_task_obs([flat_task], [flat_obs])
rnd = np.random.normal(size=action_mean.shape)
action_sample = rnd * np.exp(action_log_std) + action_mean
action_sample = self.action_space.unflatten(action_sample[0])
action_mean = self.action_space.unflatten(action_mean[0])
action_log_std = self.action_space.unflatten(action_log_std[0])
mean = self._embedding.latent_space.unflatten(latent_mean[0])
log_std = self._embedding.latent_space.unflatten(latent_log_std[0])
latent_info = dict(mean=latent_mean, log_std=latent_log_std)
return action, dict(mean=action_mean, log_std=action_log_std, latent_info=latent_info)
def get_action_from_latent(self, latent, observation):
"""Get action sampled from the latent and observation.
Args:
latent (np.ndarray): Latent var from the policy.
observation (np.ndarray): Observation from the environment.
Returns:
(np.ndarray): Action sampled from the policy.
"""
flat_obs = self.observation_space.flatten(observation)
flat_latent = self.latent_space.flatten(latent)
mean, log_std = self._f_dist_latent_obs([flat_latent], [flat_obs])
rnd = np.random.normal(size=mean.shape)
sample = rnd * | np.exp(log_std) | numpy.exp |
import torch
import numpy as np
import utils.utils_geom
import utils.utils_basic
np.set_printoptions(suppress=True, precision=6, threshold=2000)
def merge_rt_py(r, t):
# r is 3 x 3
# t is 3 or maybe 3 x 1
t = np.reshape(t, [3, 1])
rt = np.concatenate((r,t), axis=1)
# rt is 3 x 4
br = np.reshape(np.array([0,0,0,1], np.float32), [1, 4])
# br is 1 x 4
rt = np.concatenate((rt, br), axis=0)
# rt is 4 x 4
return rt
def split_rt_py(rt):
r = rt[:3,:3]
t = rt[:3,3]
r = np.reshape(r, [3, 3])
t = np.reshape(t, [3, 1])
return r, t
def apply_4x4_py(rt, xyz):
# rt is 4 x 4
# xyz is N x 3
r, t = split_rt_py(rt)
xyz = np.transpose(xyz, [1, 0])
# xyz is xyz1 x 3 x N
xyz = np.dot(r, xyz)
# xyz is xyz1 x 3 x N
xyz = np.transpose(xyz, [1, 0])
# xyz is xyz1 x N x 3
t = np.reshape(t, [1, 3])
xyz = xyz + t
return xyz
def rigid_transform_3D(xyz_cam0, xyz_cam1, do_ransac=True):
xyz_cam0 = xyz_cam0.detach().cpu().numpy()
xyz_cam1 = xyz_cam1.detach().cpu().numpy()
cam1_T_cam0 = rigid_transform_3D_py(xyz_cam0, xyz_cam1, do_ransac=do_ransac)
cam1_T_cam0 = torch.from_numpy(cam1_T_cam0).float().to('cuda')
return cam1_T_cam0
def rigid_transform_3D_py_helper(xyz0, xyz1):
assert len(xyz0) == len(xyz1)
N = xyz0.shape[0] # total points
if N > 3:
centroid_xyz0 = np.mean(xyz0, axis=0)
centroid_xyz1 = np.mean(xyz1, axis=0)
# print('centroid_xyz0', centroid_xyz0)
# print('centroid_xyz1', centroid_xyz1)
# center the points
xyz0 = xyz0 - np.tile(centroid_xyz0, (N, 1))
xyz1 = xyz1 - np.tile(centroid_xyz1, (N, 1))
H = np.dot(xyz0.T, xyz1)
U, S, Vt = | np.linalg.svd(H) | numpy.linalg.svd |
__author__ = 'Chronis'
from pySLM.definitions import SLM
import numpy as np
from tkinter import _setit
import PIL
from astropy.io import fits
import pygame, os, time, pickle
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import threading
import matplotlib.image as mplimg
from matplotlib.colors import Normalize
from matplotlib import cm
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
def cart2pol(x,y):
"""
Takes cartesian (2D) coordinates and transforms them into polar.
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return (rho, phi)
class dummyClass:
def __init__(self):
print('Dummy class')
self.maps = {'zero': np.zeros((1024, 768, 3))}
self.SLM_type = 'None'
self.pixelSize = 8
self.dimensions = (1024, 768, 3)
self.width = 1024
self.height = 768
self.size = (1024, 768)
class StdoutRedirector(object):
"""
Redirects all stdout to this object which then can be embeded into a text widget
"""
def __init__(self, text_widget):
self.text_space = text_widget
def write(self, string):
self.text_space.insert('end', string)
self.text_space.see('end')
def flush(self):
pass
def array2PIL(arr, size):
mode = 'RGBA'
arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])
if len(arr[0]) == 3:
arr = np.c_[arr, 255*np.ones((len(arr), 1), np.uint8)]
return PIL.Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
class DropMenu:
"""
DropMenu is a widget that will contain various functionalities of a menu
"""
def __init__(self, master, window):
# Create dropdown menu
self.path = os.getcwd()
self.window = window
self.master = master
self.menu = Menu(self.master)
self.master.config(menu=self.menu)
# File Option************************************************
self.FileMenu = Menu(self.menu)
self.menu.add_cascade(label='File', menu=self.FileMenu)
self.FileMenu.add_command(label='Open phase map')
self.FileMenu.add_command(label='Save as FITS', command=lambda: self.save_fits())
self.FileMenu.add_command(label='Save weighting function')
self.FileMenu.add_separator()
self.FileMenu.add_command(label='Exit', command=self._quit)
# Settings option***********************************************
self.SettingsMenu = Menu(self.menu)
self.menu.add_cascade(label='Settings', menu=self.SettingsMenu)
self.SettingsMenu.add_command(label='Calibration curve', command=self.calibration_callback)
self.SettingsMenu.add_command(label='Star info', command=self.star_info_callback)
# Tools option**************************************************
self.ToolMenu = Menu(self.menu)
self.menu.add_cascade(label='Tools', menu=self.ToolMenu)
self.ToolMenu.add_command(label='Count')
self.ToolMenu.add_command(label='Histogram')
# Help option ********************************************
self.HelpMenu = Menu(self.menu)
self.menu.add_cascade(label='Help', menu=self.HelpMenu)
self.HelpMenu.add_command(label='Documentation')
self.HelpMenu.add_command(label='App Help')
# Variables **********************************************
try:
self.menu_data = pickle.load(open("SLM_data.p", 'rb'))
self.phase_curve = self.menu_data['phase curve']
except:
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
# take data point from phase curve and fit a polynomial such that each phase shift value in radians
# corresponds to a gray value. The inverse case gray->rad will just takes these data points
p = np.polyfit(self.phase_curve, np.arange(0, 256), deg=3)
self.rad_2_gray = np.poly1d(p)
# size of SLM pixel in microns (um)
self.slm_pxl = StringVar()
# variables for SLM characteristics and system setup used in Multiple stars
self.slm_pxl.set('36')
self.intensity = StringVar()
self.wavelength = StringVar()
self.Fnum = StringVar()
self.lD = StringVar()
self.lD.set('4')
def star_info_callback(self):
"""
Contains info about the optical bench and SLM
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Star info')
toplevel_r.geometry("400x150+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.wavelength.set('633')
wavelength_entry = Entry(toplevel, textvariable=self.wavelength,justify='center')
wavelength_lab = Label(toplevel, text='Wavelength (nm):')
self.Fnum.set('230')
Fnum_entry = Entry(toplevel, textvariable=self.Fnum, justify='center')
Fnum_lab = Label(toplevel, text='F # :')
self.intensity.set('1')
intensity_entry = Entry(toplevel, textvariable=self.intensity, justify='center')
intensity_lab = Label(toplevel, text='Intensity :')
"""As discussed, here are the correct parameters for the coordinates conversion in the SLM plane :
F# = 230
Pixel_size = 36 um
The spot size in the SLM plane right now is lambda*F# ~ 145 um ~ 4 pixels.
"""
slm_pxl_lab = Label(toplevel, text='SLM pixel size (um):', justify='center')
slm_pxl_entry = Entry(toplevel, textvariable=self.slm_pxl)
lD_lab = Label(toplevel, text='#pixels per l/D:')
lD_entry = Entry(toplevel, textvariable=self.lD)
separator = ttk.Separator(toplevel, orient=VERTICAL)
set_button = ttk.Button(toplevel, text='Set', command=self.apply_star_info)
wavelength_lab.grid(column=0, row=0)
wavelength_entry.grid(column=1, row=0)
Fnum_lab.grid(column=0, row=1)
Fnum_entry.grid(column=1, row=1)
intensity_lab.grid(column=0, row=2)
intensity_entry.grid(column=1, row=2)
separator.grid(column=2, row=0, rowspan=3, sticky=(N, S))
slm_pxl_lab.grid(column=3, row=0)
slm_pxl_entry.grid(column=3, row=1)
lD_lab.grid(column=3, row=2)
lD_entry.grid(column=3, row=3)
set_button.grid(column=0, row=4)
def apply_star_info(self):
pass
def calibration_callback(self):
"""
Plots the current phase response curve and allows to select a new one
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Grayvalues calibration')
toplevel_r.geometry("300x300+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.curve_plot, self.ax = plt.subplots(figsize=(3,3))
self.line = self.ax.plot(np.arange(256), self.phase_curve, 'o')
self.ax.set_xlim([-1, 260])
self.ax.set_xlabel("gray values")
self.ax.set_ylabel("Phase shift[$\pi$]")
data_plot = FigureCanvasTkAgg(self.curve_plot, master=toplevel)
data_plot.show()
import_curve_button = ttk.Button(toplevel, text='Import curve', command=self.import_curve_callback)
import_curve_button.grid(column=0, row=2)
data_plot.get_tk_widget().grid(column=1, row=2, columnspan=4, rowspan=4)
return
def import_curve_callback(self):
"""
Used for insertion of new phase curve calibration curve. Expects an numpy array of length 256 corresponding to
each grayvalue
:return:
"""
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
self.line[0].set_data(np.arange(256), phase)
plt.draw()
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
return
def save_fits(self, name=None):
"""
Save current open phase mask as a FITS file with the center information
contained in the header
"""
file = filedialog.asksaveasfilename(master=self.master, title='Save as..', initialdir=self.path)
if file is None:
return
self.path = os.path.dirname(file)
file += '.fits'
# current = 0
if name is None:
current = self.window.maps_var.get()
else:
current = name
if current == '':
return
mask = self.window.SLM.maps[current]['data']
if self.window.active:
mask = self.window.image
hdu = fits.PrimaryHDU()
hdu.data = mask[:, :, 0]
hdu.header['center'] = str(self.window.center_position)
if len(self.window.center_position) > 1:
hdu.header['stars'] = str(self.window.multiple_star_position) + "\ ([l/D, azimuth]"
"""
if mask['star info']:
for k, val in mask['star info']:
hdu.header[k] = val
"""
hdu.header['DATE'] = time.strftime("%d/%m/%Y")
hdu.writeto(file)
return
def _quit(self):
self.window.SLM.quit()
self.master.quit() # stops mainloop
self.master.destroy()
return
class SLMViewer:
"""
Basic GUI that enables communication with SLM , on/off switch and
import/manipulation of phase maps
"""
def __init__(self, root):
self.master = Frame(root)
self.master.grid(column=0, row=0, sticky=(N, W, E, S))
root.title('SLM Controller')
try:
self.SLM = SLM()
print("SLM type is %s"%self.SLM.SLM_type)
except UserWarning:
self.SLM = dummyClass()
#raise UserWarning('No SLM connected.')
self.menu = DropMenu(root, self) # add drop-down menu
#self.SLM.pixelSize = int(self.menu.slm_pxl.get())
# =====================================================================================
# make canvas
self.off_image = np.zeros(self.SLM.dimensions)
self.image = self.off_image
self.fig, self.ax = plt.subplots()
self.norm = Normalize(vmin=0, vmax=255)
self.cmap = cm.gray
self.im = plt.imshow(self.image[:, :, 0].T, cmap=self.cmap, norm=self.norm)
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
# get image plot onto canvas and app
self.data_plot = FigureCanvasTkAgg(self.fig, master=self.master)
self.data_plot.get_tk_widget().configure(borderwidth=0)
self.fig.suptitle('SLM type : %s'%self.SLM.SLM_type, fontsize=12, fontweight='bold')
self.data_plot.show()
self.fig.canvas.mpl_connect('button_press_event', self.click_callback)
# ====================================================================================
# import phase maps frame
self.import_maps_frame = ttk.LabelFrame(self.master, text='Phase maps')
self.import_map_button = ttk.Button(self.import_maps_frame,
text='Import map', command=self.import_map_callback)
self.clear_list_button = ttk.Button(self.import_maps_frame, text='Clear', command=self.clear_maps)
self.maps_var = StringVar()
self.maps_var.set('')
if len(self.SLM.maps) > 0:
self.maps = [m for m in self.SLM.maps]
else:
self.maps = ['Zeros']
self.maps_options = OptionMenu(self.import_maps_frame, self.maps_var, *self.maps)
self.maps_options.grid(column=0, row=0)
self.import_map_button.grid(column=1, row=0)
self.clear_list_button.grid(column=1, row=1)
# ============================================================================================
# Set up center(s) position
# =============================================================================================
# default mouse position for center is center of SLM
self.mouse_coordinates = (int(self.SLM.width/2), int(self.SLM.height/2))
self.center_position = [[int(self.SLM.width/2), int(self.SLM.height/2)]]
self.plot_update()
self.center_step = 1
# =============================================================================================
# Phase mask activation/de-activation
# =============================================================================================
self.active_frame = LabelFrame(self.master, text='Activate')
self.active_var = StringVar()
self.active_var.set('OFF')
self.activation_button = Button(self.active_frame, textvariable=self.active_var,
command=self.activate, bg='firebrick2')
self.activation_button.grid(column=0, row=0)
self.active = False
# ==========================================================================================
# OPTIONS FRAME
# ==========================================================================================
self.notebook = ttk.Notebook(self.master)
self.fqpm_frame = Frame(self.notebook)
self.vortex_frame = Frame(self.notebook)
self.multiple_frame = Frame(self.notebook)
self.zernike_frame = Frame(self.notebook)
self.rotate_frame = Frame(self.notebook)
self.notebook.add(self.fqpm_frame, text='FQ/EO')
self.notebook.add(self.vortex_frame, text='Vortex')
self.notebook.add(self.multiple_frame, text='Multiple')
self.notebook.add(self.zernike_frame, text='Zernike')
self.notebook.add(self.rotate_frame, text='Phase shift')
self.notebook.grid()
# ===========================================================================================
# Star info in multiple star frame
# ===========================================================================================
self.stars_frame = ttk.LabelFrame(self.multiple_frame, text='Stars')
self.star_1 = Label(self.stars_frame, text='Star 1')
self.star_2 = Label(self.stars_frame, text='Star 2', state=DISABLED)
self.star_3 = Label(self.stars_frame, text='Star 3', state=DISABLED)
self.star_1.grid(column=0, row=1)
self.star_2.grid(column=0, row=2)
self.star_3.grid(column=0, row=3)
I_lab = ttk.Label(self.stars_frame, text='Intensity', width=10)
magn_lab = ttk.Label(self.stars_frame, text='Magnitude', width=10)
l_lab = ttk.Label(self.stars_frame, text='Wavelength(nm)', width=10)
F_lab = ttk.Label(self.stars_frame, text='F #', width=10)
lD_lab = ttk.Label(self.stars_frame, text='l/D', width=10)
phi_lab= ttk.Label(self.stars_frame, text='phi(pi)', width=10)
C_lab = ttk.Label(self.stars_frame, text='Center(x,y)', width=10)
magn_lab.grid(column=1, row=0)
I_lab.grid(column=2, row=0)
l_lab.grid(column=3, row=0)
F_lab.grid(column=4, row=0)
lD_lab.grid(column=5, row=0)
phi_lab.grid(column=6, row=0)
C_lab.grid(column=7, row=0)
# 1st star -- always visible
self.M1 = StringVar()
self.M1.set('0')
M1_entry = ttk.Entry(self.stars_frame, textvariable=self.M1, width=10)
M1_entry.grid(column=1, row=1)
self.I1_num = StringVar()
self.I1_num.set('1')
self.I1_entry = ttk.Entry(self.stars_frame, textvariable=self.I1_num, width=10)
self.I1_entry.grid(column=2, row=1)
self.l1_num = StringVar()
self.l1_num.set('633')
self.l1_entry = ttk.Entry(self.stars_frame, textvariable=self.l1_num, width=10)
self.l1_entry.grid(column=3, row=1)
self.F1_num = StringVar()
self.F1_num.set('230')
self.F1_entry = ttk.Entry(self.stars_frame, textvariable=self.F1_num, width=10)
self.F1_entry.grid(column=4, row=1)
self.starc1 = StringVar()
self.starc1.set('%i,%i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.center1_lab = Entry(self.stars_frame, textvariable=self.starc1, width=10)
self.center1_lab.grid(column=7, row=1)
# star 2
self.M2 = StringVar()
self.M2.set('0')
self.M2_entry = ttk.Entry(self.stars_frame, textvariable=self.M2,
width=10, state=DISABLED)
self.M2_entry.grid(column=1, row=2)
self.M2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_num = StringVar()
self.I2_num.set('1')
self.I2_entry = ttk.Entry(self.stars_frame, textvariable=self.I2_num,
width=10, state=DISABLED)
self.I2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_entry.grid(column=2, row=2)
self.l2_num = StringVar()
self.l2_num.set('633')
self.l2_entry = ttk.Entry(self.stars_frame, textvariable=self.l2_num,
width=10, state=DISABLED)
self.l2_entry.grid(column=3, row=2)
self.F2_num = StringVar()
self.F2_num.set('230')
self.F2_entry = ttk.Entry(self.stars_frame, textvariable=self.F2_num,
width=10, state=DISABLED)
self.F2_entry.grid(column=4, row=2)
self.starc2 = StringVar()
self.starc2.set('0,0')
self.lD_star2 = StringVar()
self.lD_star2.set('1')
self.lD_star2_entry = Entry(self.stars_frame, textvariable=self.lD_star2,
width=10, state=DISABLED)
self.lD_star2_entry.grid(column=5, row=2)
self.phi_star2 = StringVar()
self.phi_star2.set('0')
self.phi_star2_entry = Entry(self.stars_frame, textvariable=self.phi_star2,
width=10, state=DISABLED)
self.phi_star2_entry.grid(column=6, row=2)
self.center2_lab = Entry(self.stars_frame, textvariable=self.starc2,
width=10, state=DISABLED)
self.center2_lab.grid(column=7, row=2)
self.center2_lab.bind("<Return>", self.l_over_D_callback)
# star 3
self.M3 = StringVar()
self.M3.set('0')
self.M3_entry = ttk.Entry(self.stars_frame, textvariable=self.M3,
width=10, state=DISABLED)
self.M3_entry.grid(column=1, row=3)
self.M3_entry.bind("<Return>", self.magnitude_to_intensity)
self.I3_num = StringVar()
self.I3_num.set('1')
self.I3_entry = ttk.Entry(self.stars_frame, textvariable=self.I3_num,
width=10, state=DISABLED)
self.I3_entry.grid(column=2, row=3)
self.I3_entry.bind("<Return>", self.magnitude_to_intensity)
self.l3_num = StringVar()
self.l3_num.set('633')
self.l3_entry = ttk.Entry(self.stars_frame, textvariable=self.l3_num,
width=10, state=DISABLED)
self.l3_entry.grid(column=3, row=3)
self.F3_num = StringVar()
self.F3_num.set('230')
self.F3_entry = ttk.Entry(self.stars_frame, textvariable=self.F3_num,
width=10, state=DISABLED)
self.F3_entry.grid(column=4, row=3)
self.starc3 = StringVar()
self.starc3.set('0,0')
self.lD_star3 = StringVar()
self.lD_star3.set('1')
self.lD_star3_entry = Entry(self.stars_frame, textvariable=self.lD_star3,
width=10, state=DISABLED)
self.lD_star3_entry.grid(column=5, row=3)
self.phi_star3 = StringVar()
self.phi_star3.set('0')
self.phi_star3_entry = Entry(self.stars_frame, textvariable=self.phi_star3,
width=10, state=DISABLED)
self.phi_star3_entry.grid(column=6, row=3)
self.center3_lab = Entry(self.stars_frame, textvariable=self.starc3,
width=10, state=DISABLED)
self.center3_lab.grid(column=7, row=3)
self.center3_lab.bind("<Return>", self.l_over_D_callback)
# ============================================================================================
# FQPM and EOPM frame
# ============================================================================================
self.center1_lab_fqpm = Entry(self.fqpm_frame, textvariable=self.starc1)
self.center1_lab_fqpm.grid(column=4, row=0)
self.single_button = ttk.Button(self.fqpm_frame, text='Make map',
command=lambda: self.make_map('single'))
self.single_button.grid(column=0, row=0)
map_types = ['FQPM', 'EOPM', 'FLAT']
self.map_type_var = StringVar()
self.map_type_var.set('FQPM')
self.map_type_menu = OptionMenu(self.fqpm_frame, self.map_type_var, *map_types)
self.map_type_menu.grid(row=0, column=2)
# =========================================================================================================
# CONTROL FRAME
# =========================================================================================================
self.control_frame = ttk.LabelFrame(self.master, text='Center Controls')
self.cstep_var = StringVar()
self.cstep_var.set('1')
self.center_step_entry = Entry(self.control_frame, textvariable=self.cstep_var, justify='center')
self.center_step_entry.bind("<Return>", self.set_center_step)
self.center_control_up = ttk.Button(self.control_frame, text='^', command=lambda: self.center_move('up', 0))
self.center_control_down = ttk.Button(self.control_frame, text='v', command=lambda: self.center_move('down',0))
self.center_control_left = ttk.Button(self.control_frame, text='<', command=lambda: self.center_move('left',0))
self.center_control_right = ttk.Button(self.control_frame, text='>', command=lambda: self.center_move('right',0))
self.center_control_up.grid(column=1, row=0)
self.center_control_down.grid(column=1, row=2)
self.center_control_left.grid(column=0, row=1)
self.center_control_right.grid(column=2, row=1)
self.center_step_entry.grid(column=1, row=1)
self.center_num = ['1']
self.center_var = StringVar()
self.center_var.set('1')
# Set gray values
self.val_1 = 0
self.val_2 = 1
self.grayval_frame = ttk.LabelFrame(self.fqpm_frame, text='Gray values')
self.gray_1_val = StringVar()
self.gray_1_val.set('0')
self.gray_1_entry = Entry(self.grayval_frame, textvariable=self.gray_1_val, justify='center')
self.gray_1_entry.bind("<Return>", self.arrow_return)
self.gray_1_entry.bind("<Up>", self.arrow_return)
self.gray_1_entry.bind("<Down>", self.arrow_return)
self.gray_1_entry.bind("<Left>", self.arrow_return)
self.gray_1_entry.bind("<Right>", self.arrow_return)
self.gray_2_val = StringVar()
self.gray_2_val.set('0')
self.gray_2_entry = Entry(self.grayval_frame, textvariable=self.gray_2_val, justify='center')
self.gray_2_entry.bind("<Return>", self.arrow_return)
self.gray_2_entry.bind("<Up>", self.arrow_return)
self.gray_2_entry.bind("<Down>", self.arrow_return)
self.gray_2_entry.bind("<Left>", self.arrow_return)
self.gray_2_entry.bind("<Right>", self.arrow_return)
self.gray_1_lab = ttk.Label(self.grayval_frame, text='Gray-value 1')
self.gray_2_lab = ttk.Label(self.grayval_frame, text='Gray-value 2')
self.phase_1_val = StringVar()
self.phase_1_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_1_val.get())])
self.phase_2_val = StringVar()
self.phase_2_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_2_val.get())])
self.phase_1_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_1_val)
self.phase_2_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_2_val)
self.gray_1_lab.grid(column=0, row=0)
self.gray_2_lab.grid(column=0, row=1)
self.gray_1_entry.grid(column=1, row=0)
self.gray_2_entry.grid(column=1, row=1)
self.phase_1_lab.grid(column=2, row=0)
self.phase_2_lab.grid(column=2, row=1)
# ============================================================================================
# ZERNIKE TAB
# ============================================================================================
# implement various zernike terms which can be used to correct aberrations due to the SLM back-plate
#DEFOCUS
defocus_coeff_lab = ttk.Label(self.zernike_frame, text='Defocus:')
defocus_coeff_lab.grid(column=0, row=0)
self.defocus_coeff = DoubleVar()
self.defocus_coeff.set(0)
defocus_coeff_entry = Entry(self.zernike_frame, textvariable=self.defocus_coeff)
defocus_coeff_entry.grid(column=1, row=0)
#OBLIQUE ASTIGMATISM
astigm_coeff_lab = ttk.Label(self.zernike_frame, text='Obliq. Astigmatism:')
astigm_coeff_lab.grid(column=2, row=1)
self.astigm_coeff = DoubleVar()
self.astigm_coeff.set(0)
astigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.astigm_coeff)
astigm_coeff_entry.grid(column=3, row=1)
# VERTICAL ASTIGMATISM
secastigm_coeff_lab = ttk.Label(self.zernike_frame, text='Vert. Astigmatism:')
secastigm_coeff_lab.grid(column=0, row=1)
self.secastigm_coeff = DoubleVar()
self.secastigm_coeff.set(0)
secastigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.secastigm_coeff)
secastigm_coeff_entry.grid(column=1, row=1)
#TILT
tilt_coeff_lab = ttk.Label(self.zernike_frame, text='Tilt:')
tilt_coeff_lab.grid(column=2, row=2)
self.tilt_coeff = DoubleVar()
self.tilt_coeff.set(0)
tilt_coeff_entry = Entry(self.zernike_frame, textvariable=self.tilt_coeff)
tilt_coeff_entry.grid(column=3, row=2)
#TIP
tip_coeff_lab = ttk.Label(self.zernike_frame, text='Tip:')
tip_coeff_lab.grid(column=0, row=2)
self.tip_coeff = DoubleVar()
self.tip_coeff.set(0)
tip_coeff_entry = Entry(self.zernike_frame, textvariable=self.tip_coeff)
tip_coeff_entry.grid(column=1, row=2)
# X AND Y GRADIENTS
xgrad_coeff_lab = ttk.Label(self.zernike_frame, text='X gradient:')
xgrad_coeff_lab.grid(column=2, row=3)
self.xgrad_coeff = DoubleVar()
self.xgrad_coeff.set(0)
xgrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.xgrad_coeff)
xgrad_coeff_entry.grid(column=3, row=3)
ygrad_coeff_lab = ttk.Label(self.zernike_frame, text='Y gradient:')
ygrad_coeff_lab.grid(column=0, row=3)
self.ygrad_coeff = DoubleVar()
self.ygrad_coeff.set(0)
ygrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.ygrad_coeff)
ygrad_coeff_entry.grid(column=1, row=3)
# Phase shift of the zernike correction
zernike_range_lab = Label(self.zernike_frame, text='Phase shift of zernike')
zernike_range_lab.grid(column=0, row=4)
self.zernike_min = DoubleVar()
self.zernike_min.set(0)
zernike_min_entry = Entry(self.zernike_frame, textvariable=self.zernike_min)
zernike_min_entry.grid(column=1, row=4)
self.zernike_max = DoubleVar()
self.zernike_max.set(1)
zernike_max_entry = Entry(self.zernike_frame, textvariable=self.zernike_max)
zernike_max_entry.grid(column=2, row=4)
# Apply zernike corrections to the phase mask currently active or selected
apply_zernike = ttk.Button(self.zernike_frame, text='Apply', command=self.apply_zernike)
apply_zernike.grid(column=4, row=0)
# functions implementing the various zernike polynomials
self.Defocus = lambda r: np.sqrt(3)*(2*r**2)
self.Astigm = lambda r, theta: np.sqrt(6)*(r**2)*np.sin(2*theta)
self.VertAstigm = lambda r, theta: np.sqrt(6) * (r ** 2) * np.cos(2 * theta)
self.SecAstigm = lambda r, theta: np.sqrt(10)*(4*r**4-3*r**3)*np.sin(2*theta)
self.XGrad = lambda x: abs(x)
self.YGrad = lambda y: abs(y)
self.Tip = lambda r, theta: 2*r*np.cos(theta)
self.Tilt = lambda r, theta: 2*r*np.sin(theta)
# mesh grid used to create the 2d zernike polynomials in cartesian and polar coordinates
self.xx, self.yy = np.meshgrid(np.arange(-self.SLM.width/2, self.SLM.width/2),
np.arange(-self.SLM.height/2, self.SLM.height/2))
self.R, self.Theta = cart2pol(self.xx, self.yy)
# zernike_gray1_lab = Label(self.zernike_frame, text='Gray1')
# zernike_gray1_lab.grid(column=0, row=3)
# self.zernike_gray1 = IntVar()
# self.zernike_gray1.set(85)
# zernike_gray1_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray1)
# zernike_gray1_entry.grid(column=1, row=3)
#
# zernike_gray2_lab = Label(self.zernike_frame, text='Gray2')
# zernike_gray2_lab.grid(column=0, row=4)
# self.zernike_gray2 = IntVar()
# self.zernike_gray2.set(255)
# zernike_gray2_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray2)
# zernike_gray2_entry.grid(column=1, row=4)
self.zernike_gray1_old = 85
self.zernike_gray2_old = 255
# ======================================================================================
self.grayval_frame.grid(column=0, row=1, columnspan=5)
self.control_frame.grid(column=0, row=2, columnspan=5)
# ======================================================================================
# Multiple sources
# ======================================================================================
# Pack star center vars together for easy access
self.center_labels = [self.starc1, self.starc2, self.starc3]
# make frame where a binary star map or triple star map can be created
# binary phase map using airy pattern distribution for each star
self.binary_frame = ttk.Frame(self.multiple_frame)
self.binary_button = ttk.Button(self.binary_frame, text='Binary',
command=lambda: self.make_map('binary'), state=DISABLED)
self.binary_button.grid(column=1, row=1)
self.checkbox_val = IntVar()
binary_checkbox = Checkbutton(self.binary_frame, text='Save map', variable=self.checkbox_val)
binary_checkbox.grid(column=3, row=1)
self.tertiary_button = ttk.Button(self.binary_frame, text='Tertiary star',
command=lambda: self.make_map('triple'), state=DISABLED)
self.tertiary_button.grid(column=2, row=1)
self.new_map_name = StringVar()
self.new_map_name.set('Map name')
self.new_map_name_entry = Entry(self.binary_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single = Entry(self.fqpm_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single.grid(column=3, row=0)
self.new_map_name_entry.grid(column=0, row=1)
self.save_filetypes = [('Windows Bitmap', '*.bmp'), ('Text File', '*.txt'), ('Fits File', '*.fits')]
add_center_button = ttk.Button(self.binary_frame, text='Add', command=self.add_center)
add_center_button.grid(column=0, row=0)
self.centers_options = OptionMenu(self.binary_frame, self.center_var, *self.center_num)
self.centers_options.grid(column=1, row=0)
self.stars_frame.grid(column=0, row=0)
self.binary_frame.grid(column=0, row=2)
# =====================================================================================================
# Vortex tab
# =====================================================================================================
self.make_vortex = ttk.Button(self.vortex_frame, text='Make vortex',
command=lambda: self.make_map('vortex'))
self.make_vortex.grid(column=0, row=0)
# charge of the vortex
charge_lab = ttk.Label(self.vortex_frame, text='charge')
charge_lab.grid(column=2, row=1)
self.charge = IntVar()
self.charge.set(2)
self.charge_entry = Entry(self.vortex_frame, textvariable=self.charge, width=10)
self.charge_entry.bind("<Return>", self.charge_callback)
self.charge_entry.grid(column=3, row=1)
# coordinates entry
coordinates_lab = ttk.Label(self.vortex_frame, text='Coordinates')
coordinates_lab.grid(column=0, row=1)
self.vortex_coordinates = StringVar()
self.vortex_coordinates.set('%i, %i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.vortex_coordinates_entry = Entry(self.vortex_frame, textvariable=self.vortex_coordinates, width=10)
self.vortex_coordinates_entry.grid(column=1, row=1)
# label indicating gray values
gray_lab = ttk.Label(self.vortex_frame, text='Gray values')
gray_lab.grid(column=1, row=3, columnspan=2)
# gray value for the 0 pi phase
gray0_lab = ttk.Label(self.vortex_frame, text='0:', width=10)
gray0_lab.grid(column=0, row=4)
self.gray0 = IntVar()
self.gray0.set(0)
self.gray0_entry = Entry(self.vortex_frame, textvariable=self.gray0, width=10)
self.gray0_entry.grid(column=1, row=4)
# gray value for 2pi phase
gray2pi_lab = ttk.Label(self.vortex_frame, text='2pi:', width=10)
gray2pi_lab.grid(column=2, row=4)
self.gray2pi = IntVar()
self.gray2pi.set(0)
self.gray2pi_entry = Entry(self.vortex_frame, textvariable=self.gray2pi, width=10)
self.gray2pi_entry.grid(column=3, row=4)
# button to change gray values of vortex on the fly
self.gray_vortex_button = ttk.Button(self.vortex_frame, text='Change', command=self.vortex_change_grayvalues)
self.gray_vortex_button.grid(column=4, row=4)
# ============================================================================================================
# ZERNIKE WAVEFRONT SENSING
# ============================================================================================================
create_rotating_button = ttk.Button(self.rotate_frame, text='Create',
command=lambda: self.make_map('rotate'))
self.rotate_button = ttk.Button(self.rotate_frame, text='Rotate', command=self.rotate_callback, state=DISABLED)
self.rotating_var = StringVar()
self.rotating_var.set('0-pi')
self.rotating_label = ttk.Label(self.rotate_frame, textvariable=self.rotating_var, state=DISABLED)
self.rotating_list = ['0', 'pi/2', 'pi', '3pi/2']
self.whichZernike = 0
self.rotateZernike_dict = {}
lD_label = Label(self.rotate_frame, text="l/D", width=10)
self.lD_var = IntVar()
self.lD_var.set(10)
l_over_D_entry = ttk.Entry(self.rotate_frame, textvariable=self.lD_var, width=10)
create_rotating_button.grid(column=0, row=0)
self.rotate_button.grid(column=1, row=0)
self.rotating_label.grid(column=2, row=0)
lD_label.grid(column=1, row=1)
l_over_D_entry.grid(column=2, row=1)
# ======================================================================================================
self.multiple_star_position = []
# =============================================================================================================
# Text frame
# ========================================================================================================
self.text_frame = ttk.Frame(self.master)
scrollbar = Scrollbar(self.text_frame)
scrollbar.grid(column=4, row=0)
self.text = Text(self.text_frame, height=5, width=40, wrap='word', yscrollcommand=scrollbar.set)
self.text.insert(INSERT, "Initializing SLM..\n")
self.text.grid(column=0, row=0, columnspan=4)
sys.stdout = StdoutRedirector(self.text) # assign stdout to custom class
def rotating_mask(self):
"""
Create map with 1s in the center circle and 0 else
:return:
"""
if self.active:
self.image = np.zeros(self.SLM.dimensions, dtype=np.uint8)
self.active = False
self.activation_button.config(bg='firebrick2')
self.active_var.set('OFF')
m = np.zeros(self.SLM.size)
if self.lD_var.get() < 0:
return
m[np.where(self.R.T <= self.lD_var.get())] = 1
v0 = int(self.menu.rad_2_gray(0))
v1 = int(self.menu.rad_2_gray(0.5))
v2 = int(self.menu.rad_2_gray(1))
v3 = int(self.menu.rad_2_gray(1.5))
print(v0, v1, v2, v3)
# 0 - pi
p0 = | np.zeros(self.SLM.size) | numpy.zeros |
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import enum
import errno
import json
import os
import pprint
import time
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import namedtuple
import numpy as np
import click
class GoldenOutputNotAvailableError(Exception):
pass
class NoCorrectnessDesired(Exception):
pass
class ExtrasNeeded(Exception):
def __init__(self, extras):
super(ExtrasNeeded, self).__init__(
'Missing needed packages for benchmark; to fix, pip install {}'.format(
' '.join(extras)))
self.extras = extras
class FrontendCommand(click.Command):
def __init__(self, networks, *args, **kwargs):
super(FrontendCommand, self).__init__(*args, **kwargs)
self.__networks = networks
def format_epilog(self, ctx, formatter):
with formatter.section('Supported Networks'):
formatter.write_text(', '.join(self.__networks))
class Precision(enum.Enum):
TRAINING = 0.2
INFERENCE = 5e-04
class StopWatch(object):
def __init__(self, use_callgrind):
self._start = None
self._stop = None
self._use_callgrind = use_callgrind
self._callgrind_active = False
self._total = 0.0
def start_outer(self):
# Like start(), but does not turn on callgrind.
self._start = time.time()
def start(self):
self._start = time.time()
if self._use_callgrind:
os.system('callgrind_control --instr=on {}'.format(os.getpid()))
self._callgrind_active = True
def stop(self):
if self._start is not None:
stop = time.time()
self._total += stop - self._start
self._start = None
if self._callgrind_active:
self._callgrind_active = False
os.system('callgrind_control --instr=off {}'.format(os.getpid()))
def elapsed(self):
return self._total
class Output(object):
def __init__(self):
self.contents = None
self.precision = 'untested'
class Params(
namedtuple('Params', [
'batch_size', 'epochs', 'examples', 'warmups', 'network_name', 'backend_name',
'backend_opts', 'learn_phase'
])):
"""Parameters applied to a network during benchmarking."""
__slots__ = ()
@property
def epoch_size(self):
return self.examples // self.epochs
class ExplicitParamBuilder(object):
"""Builds Params for an explicit benchmark run."""
def __init__(self, batch_size, epochs, examples, warmups=32, learn_phase=None):
if not examples:
examples = 1024
self.params = Params(batch_size, epochs, examples, warmups, None, None, None, learn_phase)
def __call__(self, frontend, backend_name, network_names):
if not network_names:
raise click.UsageError('No networks specified; did you mean to add --blanket-run?')
for network_name in network_names:
params = self.params._replace(network_name=network_name, backend_name=backend_name)
yield params
class BlanketParamBuilder(object):
"""Builds Params for a blanket benchmark run."""
def __init__(self, epochs, learn_phase=None):
self.params = Params(None, epochs, 256, 32, None, None, None, learn_phase=learn_phase)
def __call__(self, frontend, backend_name, network_names):
if network_names:
raise click.UsageError(
'Networks specified with --blanket-run; choose one or the other')
for network_name in frontend.network_names:
for batch_size in frontend.blanket_batch_sizes:
params = self.params._replace(network_name=network_name,
batch_size=batch_size,
backend_name=backend_name)
yield params
class ConsoleReporter(object):
def __init__(self):
self.configuration = {}
def report(self, params, results, output):
print(results)
def complete(self):
pass
class ExplicitReporter(object):
"""Handles reports for an explicit benchmark run."""
def __init__(self, result_dir):
self.result_dir = result_dir
self.configuration = {}
def report(self, params, results, output):
try:
os.makedirs(self.result_dir)
except OSError as ex:
if ex.errno != errno.EEXIST:
click.echo(ex)
return
pprint.pprint(results)
with open(os.path.join(self.result_dir, 'result.json'), 'w') as out:
json.dump(results, out)
if isinstance(output, np.ndarray):
np.save(os.path.join(self.result_dir, 'result.npy'), output)
def complete(self):
pass
class BlanketReporter(object):
"""Handles reports for a blanket benchmark run."""
def __init__(self, result_dir):
self.result_dir = result_dir
self.outputs = {}
self.configuration = {}
self.configuration['frontend'] = None
self.configuration['backend'] = None
self.configuration['train'] = False
self.configuration['blanket_run'] = True
def report(self, params, results, output):
composite_str = ":".join(
[params.backend_name, params.network_name,
str(params.batch_size)])
self.outputs[composite_str] = {'results': dict(results)}
def complete(self):
self.outputs['run_configuration'] = self.configuration
try:
os.makedirs(self.result_dir)
except OSError as ex:
if ex.errno != errno.EEXIST:
click.echo(ex)
return
with open(
os.path.join(
self.result_dir, '{}-{}-report.json'.format(self.configuration['backend'],
self.configuration['frontend'])),
'w') as out:
json.dump(self.outputs, out, sort_keys=True, indent=2)
def _inner_run(reports,
frontend,
network_names,
params,
warmup,
callgrind,
print_stacktraces,
tile=None):
import plaidbench.cli as pb
model = frontend.model(params)
click.secho('Running {0} examples with {1}, batch size {2}, on backend {3}'.format(
params.examples, params.network_name, params.batch_size, params.backend_name),
fg='magenta')
benchmark_results = {}
model_output = None
if params.examples % params.batch_size != 0:
raise ValueError('The number of examples must be divisible by the batch size.')
try:
model.validate()
model.setup()
exec_stop_watch = StopWatch(callgrind)
compile_stop_watch = StopWatch(callgrind)
click.echo('Compiling network...', nl=False)
compile_stop_watch.start_outer()
model.compile()
compile_stop_watch.stop()
model_output, overrides = model.run(once=True)
if tile:
click.echo(' Saving Tile to {}...'.format(tile), nl=False)
model.model.predict_function._invoker.save(tile)
# Run a few more warmups -- this seems to improve the variability of the
# benchmark results.
if warmup:
click.echo(' Warming up...', nl=False)
model.run(warmup=True)
click.echo(' Running...')
exec_stop_watch.start_outer()
_, overrides = model.run()
exec_stop_watch.stop()
# Record stopwatch times
execution_duration = overrides.get('time', exec_stop_watch.elapsed())
exec_per_example = overrides.get('lastExecTimeInNS', execution_duration / params.examples)
compile_duration = compile_stop_watch.elapsed()
flops = overrides.get('flops', None)
gflops = None
if flops:
gflops = (flops / 10.0**9 / exec_per_example)
benchmark_results['GFLOP/s'] = gflops
benchmark_results['flops'] = flops
benchmark_results['compile_duration'] = compile_duration
benchmark_results['duration_per_example'] = exec_per_example
benchmark_results['tile_duration_per_example'] = exec_per_example
benchmark_results['examples'] = params.examples
benchmark_results['batch_size'] = params.batch_size
benchmark_results['model'] = params.network_name
benchmark_results['backend'] = params.backend_name
resstr = 'Example finished, elapsed: {:.3f}s (compile), {:.3f}s (execution)\n'.format(
compile_duration, execution_duration)
if gflops:
resstr += ', {:.2f} (GFLOP/s)'.format(gflops)
click.secho(resstr, fg='cyan', bold=True)
print(
"-----------------------------------------------------------------------------------------"
)
print("%-20s %-25s %-20s" % ("Network Name", "Inference Latency", "Time / FPS"))
print(
"-----------------------------------------------------------------------------------------"
)
print("%-20s %-25s %-20s" %
(params.network_name, "%.2f ms" % (exec_per_example * 1000), "%.2f ms / %.2f fps" %
(exec_per_example * 1000, 1.0 / exec_per_example)))
(golden_output, precision) = model.golden_output()
(correct, max_error, max_abs_error,
fail_ratio) = Runner._check_correctness(golden_output, model_output, precision.value)
benchmark_results['correct'] = correct
benchmark_results['max_error'] = float(max_error)
benchmark_results['max_abs_error'] = float(max_abs_error)
benchmark_results['fail_ratio'] = fail_ratio
if correct:
status = 'PASS'
else:
status = 'FAIL'
click.secho('Correctness: {}, max_error: {}, max_abs_error: {}, fail_ratio: {}'.format(
status, max_error, max_abs_error, fail_ratio),
fg='green' if status == 'PASS' else 'red')
except GoldenOutputNotAvailableError:
click.echo('Correctness: untested. Could not find golden data to compare against.')
except NoCorrectnessDesired:
pass
# Error handling
except Exception as ex:
# click.echo statements
click.echo(ex)
click.echo('Set --print-stacktraces to see the entire traceback')
# Record error
benchmark_results['exception'] = str(ex)
if print_stacktraces:
raise
finally:
reports.append((params, benchmark_results, model_output))
class Runner(object):
"""Runs an ML benchmark."""
def __init__(self, param_builder=ExplicitParamBuilder(1, 2, 1024), reporter=ConsoleReporter()):
"""Initializes the benchmark runner.
Args:
param_builder ((frontend, [str])->((Model, Params)...)): A callable that takes a
frontend and a list of network names, and returns a sequence of (Model, Params)
tuples describing the benchmarks to be run.
reporter (Reporter): Handles benchmark reports.
"""
self.verbose = False
self.result_dir = None
self.callgrind = False
self.param_builder = param_builder
self.print_stacktraces = False
self.reporter = reporter
self.warmup = True
self.timeout_secs = None
self.tile = None
def run(self, frontend, backend_name, network_names):
"""Runs a set of benchmarks.
Args:
frontend (Frontend): The interface to the ML frontend.
network_names ([str]): The names of the networks to benchmark.
"""
self.reporter.configuration['frontend'] = frontend.name
self.reporter.configuration['backend'] = backend_name
self.reporter.configuration['example_size'] = self.param_builder.params.examples
reports = []
try:
for params in self.param_builder(frontend, backend_name, network_names):
_inner_run(
reports,
frontend,
network_names,
params,
self.warmup,
self.callgrind,
self.print_stacktraces,
self.tile,
)
except KeyboardInterrupt:
click.secho("Aborting all runs...", fg="red")
finally:
# Reporter's gonna report
for report in reports:
self.reporter.report(*report)
self.reporter.complete()
return 0
@staticmethod
def _check_correctness(base_output, cur_output, precision):
# TODO: Parameterize relative and absolute error tolerance
correct = np.allclose(base_output, cur_output, rtol=precision, atol=1e-06)
# This duplicates allclose calculation for more detailed report
relative_error = ((precision * np.absolute(base_output - cur_output)) /
(1e-06 + precision * | np.absolute(cur_output) | numpy.absolute |
"""
plotting.py
-----------
This module provides classes and functions for visualizing data and neural networks.
By: <NAME>, Ph.D., 2018
"""
# Compatibility imports
from __future__ import absolute_import, division, print_function
# 3rd party imports
import numpy as np
import tensorflow as tf
from biosppy.signals import ecg
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
from biosppy.signals.tools import filter_signal
def plot_class_activation_map(model, index, time_series, labels, fs):
"""
Plots one univariate time series
Parameters
----------
model : object
Active model with live session
index : int
time series id
time_series : np.array([m, length])
image array
labels : np.array([m,])
a 1D array of length m training examples containing class labels
fs : int
sample frequency
"""
# Label lookup
label_lookup = ['Normal Sinus Rhythm', 'Atrial Fibrillation', 'Other Rhythm']
# Get logits
logits = model.sess.run(
fetches=[model.graph.logits],
feed_dict={
model.graph.x: time_series[[index]],
model.graph.y: labels[[index]],
model.graph.is_training: False,
}
)
# Get output conv
conv = model.sess.run(
fetches=[model.graph.net],
feed_dict={
model.graph.x: time_series[[index]],
model.graph.y: labels[[index]],
model.graph.is_training: False,
}
)
# Get class activation map
cam = model.sess.run(get_class_map(conv[0], np.squeeze(np.argmax(logits))))
# cam = ((cam - cam.min()) / (cam.max() - cam.min()))
cam = cam[0, :, 0]
cam_time = np.arange(conv[0].shape[1]) / (conv[0].shape[1] / 60)
# Get non-zero-pad indices
non_zero_index = np.where(time_series[index, :, 0] != 0)[0]
# Get non-zero-pad waveform
time_series_filt = time_series[index, non_zero_index, 0]
time_series_filt_ts = np.arange(time_series_filt.shape[0]) * 1 / fs
# Linear interpolation
cam_time_intrp = np.arange(time_series[index].shape[0]) * 1 / fs
cam_intrp = np.interp(cam_time_intrp, cam_time, cam)
# Get non-zero-pad cam
cam_filt = cam_intrp[non_zero_index]
# Setup figure
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(wspace=0, hspace=0)
ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=5)
ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=5)
# ax3 = plt.subplot2grid((3, 5), (2, 1), colspan=3)
prob = model.sess.run(tf.nn.softmax(logits[0]))
# Set plot title
ax1.set_title(
'True Label: ' + label_lookup[np.squeeze(np.argmax(labels[index]))] + '\n' +
'Predicted Label: ' + label_lookup[np.squeeze(np.argmax(logits))] + '\n' +
'Normal Sinus Rhythm: ' + str(np.round(prob[0][0], 2)) +
' Atrial Fibrillation: ' + str(np.round(prob[0][1], 2)) +
' Other Rhythm: ' + str(np.round(prob[0][2], 2)),
fontsize=20, y=1.03
)
# Plot image
ax1.plot(time_series_filt_ts, time_series_filt, '-k', lw=1.5)
# Axes labels
ax1.set_ylabel('Normalized Amplitude', fontsize=22)
ax1.set_xlim([0, time_series_filt_ts.max()])
ax1.tick_params(labelbottom='off')
ax1.yaxis.set_tick_params(labelsize=16)
# Plot CAM
ax2.plot(time_series_filt_ts, cam_filt, '-k', lw=1.5)
# Axes labels
ax2.set_xlabel('Time, seconds', fontsize=22)
ax2.set_ylabel('Class Activation Map', fontsize=22)
ax2.set_xlim([0, time_series_filt_ts.max()])
# ax2.set_ylim([cam_filt.min()-0.05, cam_filt.max()+0.05])
ax2.set_ylim([-3, 35])
ax2.xaxis.set_tick_params(labelsize=16)
ax2.yaxis.set_tick_params(labelsize=16)
# # Get ECG object
# ecg_object = ecg.ecg(time_series_filt, sampling_rate=fs, show=False)
#
# # Get waveform templates
# templates, _ = _get_templates(time_series_filt, ecg_object['rpeaks'], 0.4, 0.6, fs)
#
# cam_filt, _, _ = filter_signal(signal=cam_filt,
# ftype='FIR',
# band='bandpass',
# order=int(0.3 * fs),
# frequency=[3, 100],
# sampling_rate=fs)
#
# # Get cam templates
# cam_templates, _ = _get_templates(cam_filt, ecg_object['rpeaks'], 0.4, 0.6, fs)
#
# ax3.plot(templates, '-', color=[0.7, 0.7, 0.7])
# ax3.plot(np.median(templates, axis=1), '-k')
#
# ax3.set_ylim([-0.5, 1.5])
#
# ax4 = ax3.twinx()
# ax4.plot(cam_templates, '-r', lw=0.25, alpha=0.5)
# ax4.plot(np.mean(cam_templates, axis=1), '-r')
#
# ax4.set_ylim([np.median(cam_templates, axis=1).min()-0.02, np.median(cam_templates, axis=1).max()+0.02])
plt.show()
def _get_templates(waveform, rpeaks, before, after, fs):
# convert delimiters to samples
before = int(before * fs)
after = int(after * fs)
# Sort R-Peaks in ascending order
rpeaks = np.sort(rpeaks)
# Get number of sample points in waveform
length = len(waveform)
# Create empty list for templates
templates = []
# Create empty list for new rpeaks that match templates dimension
rpeaks_new = np.empty(0, dtype=int)
# Loop through R-Peaks
for rpeak in rpeaks:
# Before R-Peak
a = rpeak - before
if a < 0:
continue
# After R-Peak
b = rpeak + after
if b > length:
break
# Append template list
templates.append(waveform[a:b])
# Append new rpeaks list
rpeaks_new = | np.append(rpeaks_new, rpeak) | numpy.append |
"""Module for calibrating the double spike composition."""
import numpy as np
from scipy.optimize import minimize
from .isodata import normalise_composition, realproptoratioprop, ratioproptorealprop
from .errors import calcratiocov
def spike_calibration(
isodata, spike_measurement, mixture_measurement, isoinv=None, standard=None
):
"""A simple least squares routine for calibrating a double spike from spike-standard mixtures.
Args:
isodata: object of class IsoData, e.g. IsoData('Fe')
spike_measurement (array): a matrix of beam intensities for direct measurements of
the spike. Columns correspond to the different isotopes e.g. for Fe, first
column is 54Fe, second is 56Fe, third is 57Fe, fourth is 58Fe. The matrix should
have the same number of columns as there are isotopes available.
mixture_measurement (array): a matrix of beam intensities for the measurements of
spike-standarard mixtures.
isoinv (array): the isotopes to use in the fitting, e.g [54, 56, 57, 58]. If
None this is read from isodata.
standard (array): standard composition. If None this is read from isodata.
Returns:
This routine estimates the spike composition given a direct measurement of the spike
and measurements of spike-standard mixtures. The routine minimises the chi-squared
misfit between the measurements and model, where measurements are weighted
according to the expected covariance given in isodata.errormodel['measured'].
Output is returned as a dictionary with the following fields:
calibrated_spike: the estimated spike composition
prop_mixture: the proportion of spike in the spike-sample mixtures
beta_mixture: the fractionation factors for the mixture measurements
beta_spike: the fractionation factors for the spike measurements
misfit: the chi-squared misfit
df: the degrees of freedom for the chi-squared statistic
"""
if isoinv is None:
if isodata.isoinv is None:
raise Exception("Inversion isotopes not specified.")
isoinv = isodata.isoinv
if standard is None:
standard = isodata.standard
# make sure working with numpy arrays
spike_measurement = np.array(spike_measurement)
mixture_measurement = np.array(mixture_measurement)
# make sure working with two dimensional arrays
if mixture_measurement.ndim == 1:
mixture_measurement = mixture_measurement[np.newaxis, :]
if spike_measurement.ndim == 1:
spike_measurement = spike_measurement[np.newaxis, :]
# normalise so have compositional vectors
spike_measurement = normalise_composition(spike_measurement)
mixture_measurement = normalise_composition(mixture_measurement)
# choose isotope to denominator by using largest isotope in spike
isoinv = isodata.isoindex(isoinv)
ix = np.argmax(spike_measurement[0, isoinv])
deno = isoinv[ix]
nume = isoinv[isoinv != deno]
isoinv = np.concatenate((np.array([deno]), nume))
invrat = isodata.invrat(isoinv)
An = isodata.ratio(standard, deno)
At = isodata.ratio(spike_measurement, deno)
Am = isodata.ratio(mixture_measurement, deno)
AP = np.log(isodata.ratio(isodata.mass, deno))
n_m = mixture_measurement.shape[0]
n_t = spike_measurement.shape[0]
emod_mixture = isodata.errormodel["measured"]
VAms = [
calcratiocov(mixture_measurement[i, :], emod_mixture, deno) for i in range(n_m)
]
emod_spike = isodata.errormodel["measured"]
VAts = [calcratiocov(spike_measurement[i, :], emod_spike, deno) for i in range(n_t)]
n = An[invrat]
P = AP[invrat]
t = At[:, invrat]
m = Am[:, invrat]
Vms = [V[np.ix_(invrat, invrat)] for V in VAms]
Vts = [V[np.ix_(invrat, invrat)] for V in VAts]
Sms = [np.linalg.inv(V) for V in Vms]
Sts = [np.linalg.inv(V) for V in Vts]
# form initial guess of model parameters. guess a 50-50 mix, with no fractionation
prop0 = 0.5
lambda0 = realproptoratioprop(prop0, At[0, :], An) * np.ones(m.shape[0])
beta0 = 0.0 * np.ones(m.shape[0])
betaT0 = 0.0 * np.ones(t.shape[0])
T0 = t[0, :]
z0 = np.concatenate((lambda0, beta0, betaT0, T0))
df = (t.shape[0] + m.shape[0]) * len(invrat) - len(z0) # degrees of freedom
res = minimize(
objective,
z0,
args=(m, t, P, n, Sms, Sts, n_m, n_t),
jac=True,
tol=1e-16,
options={"disp": False, "gtol": 1e-8, "eps": 1e-12},
)
z = res.x
misfit = res.fun
lambda_, beta, betat, T = z_to_params(z, P, n_m, n_t)
# Reconstruct spike vector
calibrated_spike = np.zeros_like(spike_measurement[0, :])
calibrated_spike[deno] = 1.0
calibrated_spike[nume] = T
# For isotopes that were not used in inversion, work out an expectation based on known betat
isonum = np.arange(isodata.nisos)
unused = np.array(list(set(isonum).difference(set(isoinv))))
if len(unused) > 0:
expected_spike_measurement = np.mean(spike_measurement, axis=0)
expected_betat = np.mean(betat)
expected_spike = expected_spike_measurement * np.exp(
-np.log(isodata.mass) * expected_betat
)
expected_spike = normalise_composition(expected_spike)
expected_unused = expected_spike[unused] / expected_spike[deno]
calibrated_spike[unused] = expected_unused
calibrated_spike = normalise_composition(calibrated_spike)
AT = isodata.ratio(calibrated_spike, deno)
prop = [ratioproptorealprop(lam, AT, An) for lam in lambda_]
out = {
"calibrated_spike": calibrated_spike,
"prop_mixture": prop,
"beta_mixture": beta,
"beta_spike": betat,
"misfit": misfit,
"df": df,
}
return out
def objective(z, m, t, P, n, Wm, Wt, n_m, n_t):
"""The objective function and its Jacobian for the chi-squared minimization."""
me, te = mt_expected(z, P, n, n_m, n_t)
res_m = m - me
res_t = t - te
obs = []
for i in range(res_m.shape[0]):
rm = res_m[i, :][np.newaxis, :]
obs.append((rm @ Wm[i] @ rm.T)[0][0])
for i in range(res_t.shape[0]):
rt = res_t[i, :][np.newaxis, :]
obs.append((rt @ Wt[i] @ rt.T)[0][0])
ob = sum(obs)
dmdz, dtdz = dmt_expected_dz(z, P, n, n_m, n_t)
dob_dzs = []
for i in range(res_m.shape[0]):
rm = res_m[i, :][np.newaxis, :]
dmidz = dmdz[i, :, :]
dob_dzs.append(-(2 * rm @ Wm[i] @ dmidz)[0])
for i in range(res_t.shape[0]):
rt = res_t[i, :][np.newaxis, :]
dtidz = dtdz[i, :, :]
dob_dzs.append(-(2 * rt @ Wt[i] @ dtidz)[0])
dob_dz = | np.vstack(dob_dzs) | numpy.vstack |
import gym
import os
import torch
import stable_baselines3
from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback
from stable_baselines3 import SAC
from particle import ParticleEnv, get_particle_env
from gym.wrappers import TimeLimit
import torch as th
import numpy as np
import matplotlib.pyplot as plt
import pickle as pkl
from collections import defaultdict, Counter
import h5py
from tqdm import tqdm
max_episode_steps = 100
def train(total_timesteps=int(5e4)):
eval_env = TimeLimit(ParticleEnv(), max_episode_steps)
env = TimeLimit(ParticleEnv(), max_episode_steps)
eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/best_model',
log_path='./logs/results', eval_freq=5000)
checkpoint_callback = CheckpointCallback(save_freq=5000, save_path='./logs/')
callback = CallbackList([checkpoint_callback, eval_callback])
model = SAC('MlpPolicy', env, tensorboard_log="./log", batch_size=1024)
model.learn(total_timesteps, callback=callback)
return model
def collect_offline_data_from_model(model):
replay_buffer = model.replay_buffer
unscale_action = model.policy.unscale_action
pos = replay_buffer.pos
# SAC of sb3 will scale action automatically, so un-scale it manually.
samples = {'observations': replay_buffer.observations[:pos].reshape(pos, -1),
'actions': unscale_action(replay_buffer.actions[:pos].reshape(pos, -1)),
'rewards': replay_buffer.rewards[:pos].reshape(pos),
'terminals': replay_buffer.dones[:pos].reshape(pos),
'timeouts': replay_buffer.timeouts[:pos].reshape(pos)}
return samples
def collect_offline_data(num=int(2e5), policy_path=None):
env = TimeLimit(ParticleEnv(), max_episode_steps)
episode_rewards = []
episode_reward = 0
obs = env.reset()
samples = defaultdict(list)
model = None
if policy_path is not None:
model = SAC.load(policy_path)
for t in tqdm(range(num)):
if model is None:
action = env.action_space.sample()
else:
action, state = model.predict(obs, deterministic=True)
next_obs, reward, done, _ = env.step(action)
episode_reward += reward
samples['observations'].append(obs)
samples['actions'].append(action)
samples['rewards'].append(reward)
samples['terminals'].append(float(done))
samples['timeouts'].append(float(0))
if done:
obs = env.reset()
episode_rewards.append(episode_reward)
episode_reward = 0
else:
obs = next_obs
np_samples = {}
for key in samples.keys():
np_samples[key] = np.array(samples[key])
return np_samples, min(episode_rewards), max(episode_rewards)
def collect_multi_offline_data(num=int(2e5), policy_path_dir=None, end_policy_epoch=40000):
policy_paths = os.listdir(policy_path_dir)
policy_paths = filter(lambda x: x.endswith("zip"), policy_paths)
policy_paths = list(filter(lambda x: int(x.split("_")[2]) <= end_policy_epoch, policy_paths))
num_pre_policy = int((num / len(policy_paths) / max_episode_steps) + 1) * max_episode_steps
episode_rewards = []
samples = defaultdict(list)
for policy_name in tqdm(policy_paths):
path = os.path.join(policy_path_dir, policy_name)
policy_samples, policy_min, policy_max = collect_offline_data(num_pre_policy, path)
episode_rewards.extend([policy_min, policy_max])
for key in policy_samples.keys():
samples[key].append(policy_samples[key])
np_samples = {}
for key in samples.keys():
np_samples[key] = | np.concatenate(samples[key]) | numpy.concatenate |
# Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for various properties of covariance matrices as well as fidelity
calculations for Gaussian states.
"""
# pylint: disable=too-many-arguments
import numpy as np
from scipy.linalg import sqrtm
from ..symplectic import sympmat
def is_valid_cov(cov, hbar=2, rtol=1e-05, atol=1e-08):
r"""Checks if the covariance matrix is a valid quantum covariance matrix.
Args:
cov (array): a covariance matrix
hbar (float): value of hbar in the uncertainty relation
rtol (float): the relative tolerance parameter used in `np.allclose`
atol (float): the absolute tolerance parameter used in `np.allclose`
Returns:
(bool): whether the given covariance matrix is a valid covariance matrix
"""
(n, m) = cov.shape
if n != m:
# raise ValueError("The input matrix must be square")
return False
if not np.allclose(cov, np.transpose(cov), rtol=rtol, atol=atol):
# raise ValueError("The input matrix is not symmetric")
return False
if n % 2 != 0:
# raise ValueError("The input matrix is of even dimension")
return False
nmodes = n // 2
vals = np.linalg.eigvalsh(cov + 0.5j * hbar * sympmat(nmodes))
vals[np.abs(vals) < atol] = 0.0
if np.all(vals >= 0):
# raise ValueError("The input matrix violates the uncertainty relation")
return True
return False
def is_pure_cov(cov, hbar=2, rtol=1e-05, atol=1e-08):
r"""Checks if the covariance matrix is a valid quantum covariance matrix
that corresponds to a quantum pure state
Args:
cov (array): a covariance matrix
hbar (float): value of hbar in the uncertainty relation
rtol (float): the relative tolerance parameter used in `np.allclose`
atol (float): the absolute tolerance parameter used in `np.allclose`
Returns:
(bool): whether the given covariance matrix corresponds to a pure state
"""
if is_valid_cov(cov, hbar=hbar, rtol=rtol, atol=atol):
purity = 1 / np.sqrt(np.linalg.det(2 * cov / hbar))
if np.allclose(purity, 1.0, rtol=rtol, atol=atol):
return True
return False
def is_classical_cov(cov, hbar=2, atol=1e-08):
r"""Checks if the covariance matrix can be efficiently sampled.
Args:
cov (array): a covariance matrix
hbar (float): value of hbar in the uncertainty relation
atol (float): the absolute tolerance parameter used in `np.allclose`
Returns:
(bool): whether the given covariance matrix corresponds to a classical state
"""
if is_valid_cov(cov, hbar=hbar, atol=atol):
(n, _) = cov.shape
vals = np.linalg.eigvalsh(cov - 0.5 * hbar * np.identity(n))
vals[np.abs(vals) < atol] = 0.0
if np.all(vals >= 0):
return True
return False
def fidelity(mu1, cov1, mu2, cov2, hbar=2, rtol=1e-05, atol=1e-08):
r"""Calculates the fidelity between two Gaussian quantum states.
For two pure states :math:`|\psi_1 \rangle, \ |\psi_2 \rangle`
the fidelity is given by :math:`|\langle \psi_1|\psi_2 \rangle|^2`
Note that if the covariance matrices correspond to pure states this
function reduces to the modulus square of the overlap of their state vectors.
For the derivation see `'Quantum Fidelity for Arbitrary Gaussian States', Banchi et al. <10.1103/PhysRevLett.115.260501>`_.
The actual implementation used here corresponds to the *square* of Eq. 96 of
`'Gaussian states and operations - a quick reference', Brask <https://arxiv.org/abs/2102.05748>`_.
Args:
mu1 (array): vector of means of the first state
cov1 (array): covariance matrix of the first state
mu2 (array): vector of means of the second state
cov2 (array): covariance matrix of the second state
hbar (float): value of hbar in the uncertainty relation
rtol (float): the relative tolerance parameter used in `np.allclose`
atol (float): the absolute tolerance parameter used in `np.allclose`
Returns:
(float): value of the fidelity between the two states
"""
n0, n1 = cov1.shape
m0, m1 = cov2.shape
(l0,) = mu1.shape
(l1,) = mu1.shape
if not n0 == n1 == m0 == m1 == l0 == l1:
raise ValueError("The inputs have incompatible shapes")
# We first convert all the inputs to quantities where hbar = 1
sigma1 = cov1 / hbar
sigma2 = cov2 / hbar
deltar = (mu1 - mu2) / np.sqrt(hbar)
omega = sympmat(n0 // 2) # The symplectic matrix
sigma = sigma1 + sigma2
sigma_inv = np.linalg.inv(sigma)
vaux = omega.T @ sigma_inv @ (0.25 * omega + sigma2 @ omega @ sigma1)
sqrtm_arg = np.identity(n0) + 0.25 * np.linalg.inv(vaux @ omega @ vaux @ omega)
# The sqrtm function has issues with matrices that are close to zero, hence we branch
if np.allclose(sqrtm_arg, 0, rtol=rtol, atol=atol):
mat_sqrtm = np.zeros_like(sqrtm_arg)
else:
mat_sqrtm = sqrtm(sqrtm_arg)
det_arg = 2 * (mat_sqrtm + np.identity(n0)) @ vaux
f = np.sqrt(np.linalg.det(sigma_inv) * | np.linalg.det(det_arg) | numpy.linalg.det |
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import Bounds
__all__ = [
"MAD",
"SemiDeviation",
"VaR_Hist",
"CVaR_Hist",
"WR",
"LPM",
"Entropic_RM",
"EVaR_Hist",
"MDD_Abs",
"ADD_Abs",
"DaR_Abs",
"CDaR_Abs",
"EDaR_Abs",
"UCI_Abs",
"MDD_Rel",
"ADD_Rel",
"DaR_Rel",
"CDaR_Rel",
"EDaR_Rel",
"UCI_Rel",
"Sharpe_Risk",
"Sharpe",
"Risk_Contribution",
]
def MAD(X):
r"""
Calculate the Mean Absolute Deviation (MAD) of a returns series.
.. math::
\text{MAD}(X) = \frac{1}{T}\sum_{t=1}^{T}
| X_{t} - \mathbb{E}(X_{t}) |
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Returns
-------
value : float
MAD of a returns series.
Raises
------
ValueError
When the value cannot be calculated.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.absolute(a - np.mean(a, axis=0)), axis=0)
value = np.array(value).item()
return value
def SemiDeviation(X):
r"""
Calculate the Semi Deviation of a returns series.
.. math::
\text{SemiDev}(X) = \left [ \frac{1}{T-1}\sum_{t=1}^{T}
(X_{t} - \mathbb{E}(X_{t}))^2 \right ]^{1/2}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Semi Deviation of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
mu = np.mean(a, axis=0)
value = mu - a
n = value.shape[0]
value = np.sum(np.power(value[np.where(value >= 0)], 2)) / (n - 1)
value = np.power(value, 0.5).item()
return value
def VaR_Hist(X, alpha=0.05):
r"""
Calculate the Value at Risk (VaR) of a returns series.
.. math::
\text{VaR}_{\alpha}(X) = -\inf_{t \in (0,T)} \left \{ X_{t} \in
\mathbb{R}: F_{X}(X_{t})>\alpha \right \}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of VaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
VaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
value = -sorted_a[index]
value = np.array(value).item()
return value
def CVaR_Hist(X, alpha=0.05):
r"""
Calculate the Conditional Value at Risk (CVaR) of a returns series.
.. math::
\text{CVaR}_{\alpha}(X) = \text{VaR}_{\alpha}(X) +
\frac{1}{\alpha T} \sum_{t=1}^{T} \max(-X_{t} -
\text{VaR}_{\alpha}(X), 0)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of CVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CVaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_a[i] - sorted_a[index]
value = -sorted_a[index] - sum_var / (alpha * len(sorted_a))
value = np.array(value).item()
return value
def WR(X):
r"""
Calculate the Worst Realization (WR) or Worst Scenario of a returns series.
.. math::
\text{WR}(X) = \max(-X)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
WR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
value = -sorted_a[0]
value = np.array(value).item()
return value
def LPM(X, MAR=0, p=1):
r"""
Calculate the First or Second Lower Partial Moment of a returns series.
.. math::
\text{LPM}(X, \text{MAR}, 1) &= \frac{1}{T}\sum_{t=1}^{T}
\max(\text{MAR} - X_{t}, 0) \\
\text{LPM}(X, \text{MAR}, 2) &= \left [ \frac{1}{T-1}\sum_{t=1}^{T}
\max(\text{MAR} - X_{t}, 0)^{2} \right ]^{\frac{1}{2}} \\
Where:
:math:`\text{MAR}` is the minimum acceptable return.
:math:`p` is the order of the :math:`\text{LPM}`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
MAR : float, optional
Minimum acceptable return. The default is 0.
p : float, optional can be {1,2}
order of the :math:`\text{LPM}`. The default is 1.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
p-th Lower Partial Moment of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
if p not in [1, 2]:
raise ValueError("p can only be 1 or 2")
value = MAR - a
if p == 2:
n = value.shape[0] - 1
else:
n = value.shape[0]
value = np.sum(np.power(value[np.where(value >= 0)], p)) / n
value = np.power(value, 1 / p).item()
return value
def Entropic_RM(X, z=1, alpha=0.05):
r"""
Calculate the Entropic Risk Measure (ERM) of a returns series.
.. math::
\text{ERM}_{\alpha}(X) = z\ln \left (\frac{M_X(z^{-1})}{\alpha} \right )
Where:
:math:`M_X(z)` is the moment generating function of X.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
theta : float, optional
Risk aversion parameter, must be greater than zero. The default is 1.
alpha : float, optional
Significance level of EVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ERM of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / z * a), axis=0)
value = z * (np.log(value) + np.log(1 / alpha))
value = np.array(value).item()
return value
def _Entropic_RM(z, X, alpha=0.05):
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / z * a), axis=0)
value = z * (np.log(value) + np.log(1 / alpha))
value = np.array(value).item()
return value
def EVaR_Hist(X, alpha=0.05):
r"""
Calculate the Entropic Value at Risk (EVaR) of a returns series.
.. math::
\text{EVaR}_{\alpha}(X) = \inf_{z>0} \left \{ z
\ln \left (\frac{M_X(z^{-1})}{\alpha} \right ) \right \}
Where:
:math:`M_X(t)` is the moment generating function of X.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of EVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
(value, z) : tuple
EVaR of a returns series and value of z that minimize EVaR.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
bnd = Bounds([1e-12], [np.inf])
result = minimize(
_Entropic_RM, [1], args=(X, alpha), method="SLSQP", bounds=bnd, tol=1e-12
)
t = result.x
t = t.item()
value = _Entropic_RM(t, X, alpha)
return (value, t)
def MDD_Abs(X):
r"""
Calculate the Maximum Drawdown (MDD) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{MDD}(X) = \max_{j \in (0,T)} \left [\max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i} \right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
MDD of an uncumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > value:
value = DD
value = np.array(value).item()
return value
def ADD_Abs(X):
r"""
Calculate the Average Drawdown (ADD) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{ADD}(X) = \frac{1}{T}\sum_{j=0}^{T}\left [ \max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i} \right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ADD of an uncumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > 0:
value += DD
n += 1
if n == 0:
value = 0
else:
value = value / (n - 1)
value = np.array(value).item()
return value
def DaR_Abs(X, alpha=0.05):
r"""
Calculate the Drawdown at Risk (DaR) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{DaR}_{\alpha}(X) & = \max_{j \in (0,T)} \left \{ \text{DD}(X,j)
\in \mathbb{R}: F_{\text{DD}} \left ( \text{DD}(X,j) \right )< 1-\alpha
\right \} \\
\text{DD}(X,j) & = \max_{t \in (0,j)} \left ( \sum_{i=0}^{t}X_{i}
\right )- \sum_{i=0}^{j}X_{i}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of DaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
DaR of an uncumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i))
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
value = -sorted_DD[index]
value = np.array(value).item()
return value
def CDaR_Abs(X, alpha=0.05):
r"""
Calculate the Conditional Drawdown at Risk (CDaR) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{CDaR}_{\alpha}(X) = \text{DaR}_{\alpha}(X) + \frac{1}{\alpha T}
\sum_{j=0}^{T} \max \left [ \max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i}
- \text{DaR}_{\alpha}(X), 0 \right ]
Where:
:math:`\text{DaR}_{\alpha}` is the Drawdown at Risk of an uncumpound
cumulated return series :math:`X`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of CDaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CDaR of an uncumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert( | np.array(a) | numpy.array |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import os
import sys
import six
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.fluid as fluid
from paddle.fluid.optimizer import Adam
import paddle.fluid.framework as framework
from test_imperative_base import new_program_scope
from paddle.optimizer.lr import LRScheduler
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
SEED = 10
IMAGE_SIZE = 784
CLASS_NUM = 10
if six.PY2:
LARGE_PARAM = 2**2
else:
LARGE_PARAM = 2**26
def random_batch_reader():
def _get_random_inputs_and_labels():
np.random.seed(SEED)
image = np.random.random([BATCH_SIZE, IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (
BATCH_SIZE,
1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_inputs_and_labels()
batch_image = paddle.to_tensor(batch_image)
batch_label = paddle.to_tensor(batch_label)
yield batch_image, batch_label
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, x):
return self._linear(x)
class LayerWithLargeParameters(paddle.nn.Layer):
def __init__(self):
super(LayerWithLargeParameters, self).__init__()
self._l = paddle.nn.Linear(10, LARGE_PARAM)
def forward(self, x):
y = self._l(x)
return y
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
class TestSaveLoadLargeParameters(unittest.TestCase):
def setUp(self):
pass
def test_large_parameters_paddle_save(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LayerWithLargeParameters()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_large_param_save",
"layer.pdparams")
if six.PY2:
protocol = 2
else:
protocol = 4
paddle.save(save_dict, path, protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadPickle(unittest.TestCase):
def test_pickle_protocol(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LinearNet()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_pickle_protocol",
"layer.pdparams")
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 2.0)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 1)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 5)
protocols = [2, ]
if sys.version_info.major >= 3 and sys.version_info.minor >= 4:
protocols += [3, 4]
for protocol in protocols:
paddle.save(save_dict, path, pickle_protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadAny(unittest.TestCase):
def set_zero(self, prog, place, scope=None):
if scope is None:
scope = fluid.global_scope()
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
ten = scope.find_var(var.name).get_tensor()
if ten is not None:
ten.set(np.zeros_like(np.array(ten)), place)
new_t = np.array(scope.find_var(var.name).get_tensor())
self.assertTrue(np.sum(np.abs(new_t)) == 0)
def replace_static_save(self, program, model_path, pickle_protocol=2):
with self.assertRaises(TypeError):
program.state_dict(1)
with self.assertRaises(TypeError):
program.state_dict(scope=1)
with self.assertRaises(ValueError):
program.state_dict('x')
state_dict_param = program.state_dict('param')
paddle.save(state_dict_param, model_path + '.pdparams')
state_dict_opt = program.state_dict('opt')
paddle.save(state_dict_opt, model_path + '.pdopt')
state_dict_all = program.state_dict()
paddle.save(state_dict_opt, model_path + '.pdall')
def replace_static_load(self, program, model_path):
with self.assertRaises(TypeError):
program.set_state_dict(1)
state_dict_param = paddle.load(model_path + '.pdparams')
state_dict_param['fake_var_name.@@'] = np.random.randn(1, 2)
state_dict_param['static_x'] = 'UserWarning'
program.set_state_dict(state_dict_param)
state_dict_param['static_x'] = np.random.randn(1, 2)
program.set_state_dict(state_dict_param)
program.set_state_dict(state_dict_param)
state_dict_opt = paddle.load(model_path + '.pdopt')
program.set_state_dict(state_dict_opt)
def test_replace_static_save_load(self):
paddle.enable_static()
with new_program_scope():
x = paddle.static.data(
name="static_x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 10)
z = paddle.static.nn.fc(z, 10, bias_attr=False)
loss = fluid.layers.reduce_mean(z)
opt = Adam(learning_rate=1e-3)
opt.minimize(loss)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
fake_inputs = np.random.randn(2, IMAGE_SIZE).astype('float32')
exe.run(prog, feed={'static_x': fake_inputs}, fetch_list=[loss])
base_map = {}
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_map[var.name] = t
path = os.path.join("test_replace_static_save_load", "model")
# paddle.save, legacy paddle.fluid.load
self.replace_static_save(prog, path)
self.set_zero(prog, place)
paddle.fluid.io.load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, np.array(base_t)))
# legacy paddle.fluid.save, paddle.load
paddle.fluid.io.save(prog, path)
self.set_zero(prog, place)
self.replace_static_load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
# test for return tensor
path_vars = 'test_replace_save_load_return_tensor_static/model'
for var in prog.list_vars():
if var.persistable:
tensor = var.get_value(fluid.global_scope())
paddle.save(tensor, os.path.join(path_vars, var.name))
with self.assertRaises(TypeError):
var.get_value('fluid.global_scope()')
with self.assertRaises(ValueError):
x.get_value()
with self.assertRaises(TypeError):
x.set_value('1')
fake_data = np.zeros([3, 2, 1, 2, 3])
with self.assertRaises(TypeError):
x.set_value(fake_data, '1')
with self.assertRaises(ValueError):
x.set_value(fake_data)
with self.assertRaises(ValueError):
var.set_value(fake_data)
# set var to zero
self.set_zero(prog, place)
for var in prog.list_vars():
if var.persistable:
tensor = paddle.load(
os.path.join(path_vars, var.name), return_numpy=False)
var.set_value(tensor)
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
def test_paddle_save_load_v2(self):
paddle.disable_static()
class StepDecay(LRScheduler):
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
layer = LinearNet()
inps = paddle.randn([2, IMAGE_SIZE])
adam = opt.Adam(
learning_rate=StepDecay(0.1, 1), parameters=layer.parameters())
y = layer(inps)
y.mean().backward()
adam.step()
state_dict = adam.state_dict()
path = 'paddle_save_load_v2/model.pdparams'
with self.assertRaises(TypeError):
paddle.save(state_dict, path, use_binary_format='False')
# legacy paddle.save, paddle.load
paddle.framework.io._legacy_save(state_dict, path)
load_dict_tensor = paddle.load(path, return_numpy=False)
# legacy paddle.load, paddle.save
paddle.save(state_dict, path)
load_dict_np = paddle.framework.io._legacy_load(path)
for k, v in state_dict.items():
if isinstance(v, dict):
self.assertTrue(v == load_dict_tensor[k])
else:
self.assertTrue(
np.array_equal(v.numpy(), load_dict_tensor[k].numpy()))
if not np.array_equal(v.numpy(), load_dict_np[k]):
print(v.numpy())
print(load_dict_np[k])
self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k]))
def test_single_pickle_var_dygraph(self):
# enable dygraph mode
paddle.disable_static()
layer = LinearNet()
path = 'paddle_save_load_v2/var_dygraph'
tensor = layer._linear.weight
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol='3')
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol=5)
paddle.save(tensor, path)
t_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase))
self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph))
self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy()))
paddle.enable_static()
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(lod_static, paddle.fluid.core.LoDTensor))
self.assertTrue(np.array_equal(tensor.numpy(), np_static))
self.assertTrue(np.array_equal(tensor.numpy(), np.array(lod_static)))
def test_single_pickle_var_static(self):
# enable static mode
paddle.enable_static()
with new_program_scope():
# create network
x = paddle.static.data(
name="x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 128)
loss = fluid.layers.reduce_mean(z)
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
for var in prog.list_vars():
if list(var.shape) == [IMAGE_SIZE, 128]:
tensor = var.get_value()
break
scope = fluid.global_scope()
origin_tensor = np.array(tensor)
path = 'test_single_pickle_var_static/var'
paddle.save(tensor, path)
self.set_zero(prog, place, scope)
# static load
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
# set_tensor(np.ndarray)
var.set_value(np_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# set_tensor(LoDTensor)
self.set_zero(prog, place, scope)
var.set_value(lod_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# enable dygraph mode
paddle.disable_static()
var_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(np.array_equal(np.array(tensor), np_dygraph))
self.assertTrue(np.array_equal(np.array(tensor), var_dygraph.numpy()))
def test_dygraph_save_static_load(self):
inps = np.random.randn(1, IMAGE_SIZE).astype('float32')
path = 'test_dygraph_save_static_load/dy-static.pdparams'
paddle.disable_static()
with paddle.utils.unique_name.guard():
layer = LinearNet()
state_dict_dy = layer.state_dict()
paddle.save(state_dict_dy, path)
paddle.enable_static()
with new_program_scope():
layer = LinearNet()
data = paddle.static.data(
name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32')
y_static = layer(data)
program = paddle.static.default_main_program()
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
state_dict = paddle.load(path, keep_name_table=True)
program.set_state_dict(state_dict)
state_dict_param = program.state_dict("param")
for name, tensor in state_dict_dy.items():
self.assertTrue(
np.array_equal(tensor.numpy(),
np.array(state_dict_param[tensor.name])))
def test_save_load_complex_object_dygraph_save(self):
paddle.disable_static()
layer = paddle.nn.Linear(3, 4)
state_dict = layer.state_dict()
obj1 = [
paddle.randn(
[3, 4], dtype='float32'), | np.random.randn(5, 6) | numpy.random.randn |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# ryanvolz's Ambiguity Function](https://gist.github.com/ryanvolz/8b0d9f3e48ec8ddcef4d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def ambiguity(code, nfreq=1):
"""Calculate the ambiguity function of code for nfreq frequencies.
The ambiguity function is the square of the autocorrelation,
normalized so the peak value is 1.
For correct results, we require that nfreq >= len(code).
The result is a 2-D array with the first index corresponding
to frequency shift. The code is frequency shifted by
normalized frequencies of range(nfreq)/nfreq and correlated
with the baseband code. The result amb[0] gives the
ambiguity with 0 frequency shift, amb[1] with 1/nfreq
frequency shift, etc. These frequencies are the same as (and
are in the same order as) the FFT frequencies for an nfreq-
length FFT.
****Thus, the peak value is at amb[0, len(code) - 1]****
To relocate the peak to the middle of the result, use
np.fft.fftshift(amb, axes=0)
To relocate the peak to the [0, 0] entry, use
np.fft.ifftshift(amb, axes=1)
"""
inlen = len(code)
outlen = 2*inlen - 1
#if nfreq < inlen:
# nfreq = inlen
# Doppler shift the code to form a correlation bank in the form of a matrix
doppleridx = np.arange(nfreq)[:, np.newaxis]*np.arange(inlen)
dopplermat = np.exp(2*np.pi*1j*doppleridx/nfreq)
# code is conjugated to form matched correlation
codebank = code.conj()*dopplermat
# initialize the output autocorrelation array
acorr = np.zeros((nfreq, outlen), np.complex_)
# correlate the Doppler-shifted codes with the original code
# to get autocorrelation
for k, shifted_code in enumerate(codebank):
acorr[k] = np.correlate(code, shifted_code, mode='full')
# calculate ambiguity function as normalized square magnitude of autocorrelation
# (index of peak value is [0, inlen - 1])
amb = np.abs(acorr / acorr[0, inlen - 1])**2
return amb
def plotamb(code, channels, tone, window, rate):
def update(frame_number):
barker13 = np.asarray(code[0], np.complex)*mixer_sin
b13amb = ambiguity(barker13, window)
im.set_data(np.fft.fftshift(b13amb, axes=0).T)
return im
def init():
barker13 = np.ones(L, np.complex)
b13amb = ambiguity(barker13, window)
im.set_data(a*np.fft.fftshift(b13amb, axes=0).T)
return im
fig = plt.figure()
plt.xlabel('Frequency Index')
plt.ylabel('Delay Index')
barker13 = | np.asarray(code[0], np.complex) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 13:08:11 2019
@author: logiusti
"""
import pickle
import numpy as np
import pandas as pd
from sklearn import metrics
from astropy.convolution import Gaussian1DKernel,convolve
from multiprocessing.dummy import Pool as ThreadPool
from scipy.spatial import ConvexHull, distance_matrix
import Loader
from Utility import timeit
import json
import os
class Solver:
r"""
Class wich provies all the functionalities to do math on our data
"""
def __init__(self):
self.loader = Loader.Loader(r"C:\Users\logiusti\Lorenzo\Data\ups")
self.loader.set_all_public_variables()
def get_all_pof(self, df):
def sigmoid(x):
"""
parametrizzare
"""
return 1/(1+np.exp(-x+10))
def get_one_pof(p0, p, eta, clicks):
"""
parametrizzare
"""
distance = 1-(1/(1+np.linalg.norm(p0-p, 1)))
pof_eta_load = sigmoid(0.75*eta**.5 + 0.6*clicks**.5)
pof = distance*pof_eta_load**.5
return pof
def get_p0_name():
# test points
pts = df[[2, 3, 4]].to_numpy()
# two points which are fruthest apart will occur as vertices of the convex hull
candidates = pts[ConvexHull(pts).vertices]
# get distances between each pair of candidate points
dist_mat = distance_matrix(candidates, candidates)
# get indices of candidates that are furthest apart
i, j = np.unravel_index(dist_mat.argmax(), dist_mat.shape)
#get the data into the df according to the most distance points
tmp_df = df[(df[[2, 3, 4]].to_numpy() == candidates[j]) |
(df[[2, 3, 4]].to_numpy() == candidates[i])]
#return the one who has lower clicks and lower age
return tmp_df.assign(f=tmp_df['eta']**2 * tmp_df['clicks']**2)\
.sort_values('f')\
.drop('f', axis=1)\
.iloc[0]['UPS']
v = []
p0 = df.loc[df['UPS'] == get_p0_name()][[2, 3, 4]].to_numpy()
for _, row in df.iterrows():
p = | np.array([row[2], row[3], row[4]]) | numpy.array |
"""
infinite_fourier.py
====================================
Module for calculating :math:`C_I` using linear algebra,
based on components generated by the method of recursive images
using fourier series to obtain :math:`V_{I\infty}`
"""
import scipy
import scipy.special
import numpy as np
import numpy.linalg
eps0= 8.854187817*10**-12
class layer:
def __init__(self,eps_x,eps_y,t):
"""
layer class
Parameters
----------
eps_x : float
relative dielectric constant in the x-direction
eps_ys : layer object
relative dielectric constant in the y-direction
t : float
thickness of layer
max_fourier_n : int
max number of fourier compoents in model
"""
self.eps_x=eps_x
self.eps_y=eps_y
if self.eps_y>0:
self.eps_r=(self.eps_x/self.eps_y)**0.5
else:
self.eps_r=1
self.t=t
self.t_eff=t*self.eps_r
self.interface_pos_dir=None
self.interface_neg_dir=None
class interface:
"""
Interface class
Parameters
----------
layer_pos_dir : layer object
The layer in positive y-direction
layer_neg_dir : layer object
The layer in negative y-direction
y : float
y-coordinate of the layer
max_fourier_n : int
max number of fourier compoents in model
"""
def __init__(self,layer_pos_dir,layer_neg_dir,y,max_fourier_n):
self.layer_pos_dir=layer_pos_dir
self.layer_neg_dir=layer_neg_dir
# calculate regular (not effective) r and t
eps1=np.sqrt(layer_pos_dir.eps_y*layer_pos_dir.eps_x)
eps2=np.sqrt(layer_neg_dir.eps_y*layer_neg_dir.eps_x)
self.r_pos_dir=(eps2-eps1)/(eps1+eps2)
self.t_pos_dir=self.r_pos_dir+1
self.r_neg_dir=-self.r_pos_dir
self.t_neg_dir=self.r_neg_dir+1
self.y=y
# set all effective coefficients to None, are calculated when the get functions are called
self.r_pos_dir_eff=np.full((max_fourier_n), None)
self.t_pos_dir_eff=np.full((max_fourier_n), None)
self.r_neg_dir_eff=np.full((max_fourier_n), None)
self.t_neg_dir_eff=np.full((max_fourier_n), None)
def get_r_pos_dir_eff(self,n):
"""
Calculate :math:`\\r_{eff}` in the positive direction
Parameters
----------
n : int
Forier compoent
"""
if self.r_pos_dir_eff[n]==None:
if self.layer_pos_dir.interface_pos_dir==None:
# if this is the last material, it has infinite extension, and it is easy
self.r_pos_dir_eff[n]=self.r_pos_dir
self.t_pos_dir_eff[n]=self.t_pos_dir
else:
# if this is not the last material, calculate effective coefficients r and t
rup=self.layer_pos_dir.interface_pos_dir.get_r_pos_dir_eff(n)
t=self.layer_pos_dir.t_eff
exp=np.exp(-np.pi*2*t*(2*n+1))
factor=1/(1-self.r_neg_dir*rup*exp) #== sum {m=0->inf} (self.r_neg_dir*rup*exp)^m
self.r_pos_dir_eff[n]=self.r_pos_dir+self.t_pos_dir*rup*self.t_neg_dir*exp*factor
self.t_pos_dir_eff[n]=self.t_pos_dir+self.t_pos_dir*rup*self.r_neg_dir*exp*factor
return self.r_pos_dir_eff[n]
def get_t_pos_dir_eff(self,n):
"""
Calculate :math:`\\t_{eff}` in the positive direction
Parameters
----------
n : int
Forier compoent
"""
if self.t_pos_dir_eff[n]==None:
self.get_r_pos_dir_eff(n)
return self.t_pos_dir_eff[n]
def get_r_neg_dir_eff(self,n):
"""
Calculate :math:`\\r_{eff}` in the negative direction
Parameters
----------
n : int
Forier compoent
"""
if self.r_neg_dir_eff[n]==None:
if self.layer_neg_dir.interface_neg_dir==None:
# if this is the first material, it has infinite extension, and it is easy
self.r_neg_dir_eff[n]=self.r_neg_dir
self.t_neg_dir_eff[n]=self.t_neg_dir
else:
# if this is not the last material, calculate effective coefficients r and t
rne=self.layer_neg_dir.interface_neg_dir.get_r_neg_dir_eff(n)
t=self.layer_neg_dir.t_eff
exp=np.exp(-np.pi*2*t*(2*n+1))
factor=1/(1-self.r_pos_dir*rne*exp)
self.r_neg_dir_eff[n]=self.r_neg_dir+self.t_neg_dir*rne*self.t_pos_dir*exp*factor
self.t_neg_dir_eff[n]=self.t_neg_dir+self.t_neg_dir*rne*self.r_pos_dir*exp*factor
return self.r_neg_dir_eff[n]
def get_t_neg_dir_eff(self,n):
"""
Calculate :math:`\\t_{eff}` in the negative direction
Parameters
----------
n : int
Forier compoent
"""
if self.t_neg_dir_eff[n]==None:
self.get_r_neg_dir_eff(n)
return self.t_neg_dir_eff[n]
class single_recursive_images:
"""
A class that houses the potential, capacitance and electric fields from the method of recursive images.
The values obtained are unphysical, as the potential at the electrodes is not constant.
Parameters
----------
eta : float
cover fraction of the electrodes
interface_of_electrodes : int
interface for the electrodes
thickness_of_layers : list of floats
thicknesses of the layers, this list will be 2 shorter than eps_x_of_layers and eps_y_of_layers, as the outermost layers have no defined thickness, but are infinite
eps_x_of_layers : list of floats
in-plane dielectric constant of the layers
eps_y_of_layers : list of floats
out-of-plane dielectric constant of the layers
max_fourier_n : int, optional
maximum number of fourier components to considder, defaults to 120
accuracy_limit : float, optional
reflections with less than accuracy_limit are ignored, defaults to 10**-15
inherit: single_recursive_images object or None, optional
If not None, will inherit layers and interfaces from the pobject indicated.
If None, new layer and interface objects will be created. Defaults to None
"""
_A_ditcts=dict()#adict[k][n]=A_{2n+1}
_V_dicts=dict()#Vdict[k][x]=V_{I,inf}(x,0)
#Common variables
#----------------
#_A_ditcts : dictionary
# :math:`A(\\eta)_{2n+1}` is stored here so it does not need to be recalculated for later iterations
#_V_dicts : dictionary
# :math:`V(\\eta)_{2n+1}` is stored here so it does not need to be recalculated for later iterations
def __init__(self,eta,interface_of_electrodes,thickness_of_layers,eps_x_of_layers,eps_y_of_layers,max_fourier_n=120,accuracy_limit=10**-15, inherit=None, hybrid=True):
"""
Returns
-------
class object
"""
self.hybrid=hybrid
# declare the layers and interfaces, if not inherited
if inherit==None:
self.layers=[]
for e11,e33,T in zip(eps_x_of_layers,eps_y_of_layers,[np.inf]+list(thickness_of_layers)+[np.inf]):
self.layers.append(layer(e11,e33,T))
self.interfaces=[]
for i, _ in enumerate(self.layers[0:-1]):
if i==0:
z=0
else:
z=sum(thickness_of_layers[0:i])
self.interfaces.append(interface(self.layers[i+1],self.layers[i],z,max_fourier_n))
self.layers[i].interface_pos_dir=self.interfaces[-1]
self.layers[i+1].interface_neg_dir=self.interfaces[-1]
else:
self.layers=inherit.layers
self.interfaces=inherit.interfaces
self.eta=np.array(eta)
self.interface_of_electrodes=interface_of_electrodes
self.number_of_layers=len(self.layers)
self.max_fourier_n=max_fourier_n
self.accuracy_limit=accuracy_limit
self.k=np.sin(np.pi/2*self.eta)
self.Kk= scipy.special.ellipk(float(self.k**2))
self.Kpk= scipy.special.ellipk(1-float(self.k**2))
if not self.k in self._A_ditcts:
self._A_ditcts[self.k]=dict()
self.adict=self._A_ditcts[self.k] #dict()#adict[n]=A_{2n+1}
if self.hybrid:
if not self.k in self._V_dicts:
self._V_dicts[self.k]=dict()
self.Vdict=self._V_dicts[self.k] #dict()#adict[n]=A_{2n+1}
'''if not self.k in self._V_dicts:
self._V_dicts[self.k]=dict()
self.Vdict=self._V_dicts[self.k] #dict()#adict[n]=A_{2n+1}'''
self.tree=[]
self.P_field_pos=np.full((max_fourier_n), None)#r_pos_dir)
self.P_field_neg=np.full((max_fourier_n), None)#t_pos_dir)
def get_P_field_pos(self,n):
"""
Gets the sum of all fields projected from the electrodes in the positive direction for fourier compoent n
Parameters
----------
n : int
fourier compoent
Returns
-------
float
multiplication factor of projected fields with respect to the case with no reflections
"""
if self.P_field_pos[n]==None:
if not self.P_field_pos[n-1]==1: # if self.P_field_pos[n-1]==1, we conclude that it will be 1 for all further components
# sum of fields originating from the field initially projected in the positive direction
if len(self.interfaces)>self.interface_of_electrodes+1:
rpo=self.interfaces[self.interface_of_electrodes+1].get_r_pos_dir_eff(n)
rne=self.interfaces[self.interface_of_electrodes].get_r_neg_dir_eff(n)
t=self.layers[self.interface_of_electrodes+1].t_eff
self.P_field_pos[n]=1/(1-rpo*rne*np.exp(-np.pi*2*t*(2*n+1)))
else:
self.P_field_pos[n]=1
# sum of fields originating from the field initially projected in the negative direction
if self.interface_of_electrodes>0:
rpo=self.interfaces[self.interface_of_electrodes].get_r_pos_dir_eff(n)
tpo=self.interfaces[self.interface_of_electrodes].get_t_pos_dir_eff(n)
rne=self.interfaces[self.interface_of_electrodes-1].get_r_neg_dir_eff(n)
t=self.layers[self.interface_of_electrodes].t_eff
self.P_field_pos[n]+=rne*tpo*np.exp(-np.pi*2*t*(2*n+1)) / (1-rpo*rne*np.exp(-np.pi*2*t*(2*n+1)))
if abs(self.P_field_pos[n]-1)<self.accuracy_limit: # mark that we are done calculating components, by setting self.P_field_pos[n]=1
self.P_field_pos[n]=1
else:
self.P_field_pos[n]=1
return self.P_field_pos[n]
def get_P_field_neg(self,n):
"""
Gets the sum of all fields projected from the electrodes in the negative direction for fourier compoent n
Parameters
----------
n : int
fourier compoent
Returns
-------
float
multiplication factor of projected fields with respect to the case with no reflections
"""
if self.P_field_neg[n]==None:
if not self.P_field_neg[n-1]==1: # if self.P_field_neg[n-1]==1, we conclude that it will be 1 for all further components
# sum of fields originating from the field initially projected in the positive direction
if self.interface_of_electrodes>0:
rpo=self.interfaces[self.interface_of_electrodes].get_r_pos_dir_eff(n)
rne=self.interfaces[self.interface_of_electrodes-1].get_r_neg_dir_eff(n)
t=self.layers[self.interface_of_electrodes].t_eff
self.P_field_neg[n]=1/(1-rpo*rne*np.exp(-np.pi*2*t*(2*n+1)))
else:
self.P_field_neg[n]=1
# sum of fields originating from the field initially projected in the positive direction
if len(self.interfaces)>self.interface_of_electrodes+1:
rpo=self.interfaces[self.interface_of_electrodes+1].get_r_pos_dir_eff(n)
rne=self.interfaces[self.interface_of_electrodes].get_r_neg_dir_eff(n)
tne=self.interfaces[self.interface_of_electrodes].get_t_neg_dir_eff(n)
t=self.layers[self.interface_of_electrodes+1].t_eff
self.P_field_neg[n]+=rpo*tne*np.exp(-np.pi*2*t*(2*n+1)) / (1-rpo*rne*np.exp(-np.pi*2*t*(2*n+1)))
if abs(self.P_field_neg[n]-1)<self.accuracy_limit: # mark that we are done calculating components, by setting self.P_field_pos[n]=1
self.P_field_neg[n]=1
else:
self.P_field_neg[n]=1
return self.P_field_neg[n]
def get_C(self):
"""
Returns
-------
float
capacitace
"""
eps_above=self.layers[self.interface_of_electrodes].eps_y*self.layers[self.interface_of_electrodes].eps_r
eps_below=self.layers[self.interface_of_electrodes+1].eps_y*self.layers[self.interface_of_electrodes+1].eps_r
return (eps_above+eps_below)*self.Kk/self.Kpk*eps0/2
def get_C_int_Ex(self):
"""
Function for calculating the capacitance by integrating :math:`\\varepsilon_xE_x` at :math:`x=0`.\n
Used for testing, as it should give same output as get_C().\n
For all practical applications get_C() should be used instead\n
Returns
-------
float
capacitace
"""
if self.layers[0].eps_x>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), -np.inf, 0)
C= G*self.layers[0].eps_x*eps0
else:
C=0
for i in range(self.number_of_layers-2):
if self.layers[i+1].eps_x>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.interfaces[i].y, self.interfaces[i+1].y)
C+= G*self.layers[i+1].eps_x*eps0
if self.layers[-1].eps_x>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.interfaces[-1].y, np.inf)
C+= G*self.layers[-1].eps_x*eps0
return C
def getA(self,n):
"""
calculates :math:`A_{2n+1}`
Parameters
----------
n : fourier compoent
Returns
-------
float
A_(2n+1)
"""
if not n in self.adict:
Pn=scipy.special.legendre(n)(2*self.k**2-1)
self.adict[n]=np.pi/self.Kpk*Pn #=A_{2n+1}
return self.adict[n]
def get_V_Ex_Ey(self,x,y,get_V=1,get_Ex=1,get_Ey=1): # accepts 'x' as a list, but 'y' must be single value
"""
Function for calculating the the potential and electric fields at coordinates (x,y)
The potential and fields are calculated as the sum of the field going away from the electrodes,
and the field going towards the electrodes.
The amplitude of the field going away from the electrode is:
sum_{n=0->inf} (all t_{eff,n} from electrodes to layer)*exp(-pi*exp_arg_fac*(2n+1))
where exp(-pi*exp_arg_fac*(2n+1))is calculated as cur_exp=exp(-pi*exp_arg_fac)*exp(-2*pi*exp_arg_fac)^n
Similarly, the amplitude of the field going away from the electrode is:
sum_{n=0->inf} (all t_{eff,n} from electrodes to layer)*(r_{eff,n} at the next interface)*exp(-pi*reverse_exp_arg_fac*(2n+1))
where exp(-pi*reverse_exp_arg_fac*(2n+1))is calculated as cur_exp=exp(-pi*reverse_exp_arg_fac)exp(-2*pi*reverse_exp_arg_fac)^n
Parameters
----------
x : float or list of floats
x-coordiate(s)
y : float
y-coordiate
get_V : bool, optional
V is only calculated if this flag is set to True, default: True
get_Ex : bool, optional
Ex is only calculated if this flag is set to True, default: True
get_Ey : bool, optional
Ey is only calculated if this flag is set to True, default: True
Returns
-------
list of floats for V, Ex, Ey
"""
x=np.array(x)
x=np.atleast_1d(x)
if self.hybrid:
if y==self.interfaces[self.interface_of_electrodes].y and get_V==1 and get_Ex==0 and get_Ey==0:
V=self.get_V_electrodes(x)
return V, 0,0
V=np.zeros(x.size)
Ex=np.zeros(x.size)
Ey=np.zeros(x.size)
# get current layer we are in
layer=0
while layer<len(self.interfaces) and self.interfaces[layer].y <= y:
layer+=1
distPrior=0
reflected_field=0
if self.interfaces[self.interface_of_electrodes].y <= y: # we are above electrodes
direction=1
getPfield=self.get_P_field_pos
for i in range(self.interface_of_electrodes+1,layer):
distPrior+=self.layers[i].t_eff
# calculate exp_arg_fac, the argument for the exponential, i.e. exp((2n+1)*exp_arg_fac)
exp_arg_fac=-np.pi*( (y-self.interfaces[layer-1].y)*self.layers[layer].eps_r+distPrior )
if len(self.interfaces) > layer:
reverse_exp_arg_fac=-np.pi*( (self.interfaces[layer].y-y)*self.layers[layer].eps_r+self.layers[layer].t_eff+distPrior )
reflected_field=1
else: # we are below electrodes
direction=-1
getPfield=self.get_P_field_neg
for i in range(self.interface_of_electrodes,layer,direction):
distPrior+=self.layers[i].t_eff
exp_arg_fac=-np.pi*( direction*(y-self.interfaces[layer].y)*self.layers[layer].eps_r+distPrior )
if layer>0:
reverse_exp_arg_fac=-np.pi*( direction*(self.interfaces[layer-1].y-y)*self.layers[layer].eps_r+self.layers[layer].t_eff+distPrior )
reflected_field=1
exp_mul_fac=np.exp(exp_arg_fac*2) # <- multiplication factor for the exponential attenuation
cur_exp=np.exp(exp_arg_fac) # <- current exponential attenuation
if reflected_field==1: #if there is a reflected field, do also for this
reverse_exp_mul_fac= | np.exp(reverse_exp_arg_fac*2) | numpy.exp |
# -*- coding: UTF-8 -*-
import math
import pymatgen as mg
from ase.utils import gcd, basestring
from ase.build import bulk
from copy import deepcopy
from numpy.linalg import norm, solve
from pymatgen.analysis.graphs import MoleculeGraph, StructureGraph
from pymatgen.core.structure import Molecule
from pymatgen.io.vasp.inputs import Poscar
from ase import io
import networkx.algorithms.isomorphism as iso
import numpy as np
import networkx as nx
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
# used for deciding which atoms are bonded
from pymatgen.analysis.local_env import JmolNN
import os
import sys
import time
from functools import wraps
from collections import Counter
def from_ASE_to_pymatgen(working_dir, images):
"""
change ASE structure to pymatgen structure
"""
file_name = working_dir + "/temp.POSCAR.vasp"
io.write(file_name, images)
modify_poscar(file_name)
slab = mg.Structure.from_file(file_name)
os.remove(file_name)
return slab
def modify_poscar(file):
"""
Change the file to compliant POSCAR.
"""
index = 0
prev_file = open(file, 'r')
new_file = open(file+'.new', 'w')
for line in prev_file:
if index == 0:
tmp = line
new_file.write('slab\n')
elif index == 5:
#new_file.write(tmp)
new_file.write(line)
else:
new_file.write(line)
index = index+1
prev_file.close()
new_file.close()
os.remove(file)
os.rename(file+'.new', file)
def surface(lattice, indices, layers, tol=1e-10, termination=0):
"""Create surface from a given lattice and Miller indices.
lattice: Atoms object or str
Bulk lattice structure of alloy or pure metal. Note that the
unit-cell must be the conventional cell - not the primitive cell.
One can also give the chemical symbol as a string, in which case the
correct bulk lattice will be generated automatically.
indices: sequence of three int
Surface normal in Miller indices (h,k,l).
layers: int
Number of equivalent layers of the slab.
termination: int
The termination "number" for your crystal. The same value will not
produce the same termination for different symetrically identical
bulk structures, but changing this value allows your to explore all
the possible terminations for the bulk structure you provide it.
note: this code is not well tested
"""
indices = np.asarray(indices)
if indices.shape != (3,) or not indices.any() or indices.dtype != int:
raise ValueError('%s is an invalid surface type' % indices)
if isinstance(lattice, basestring):
lattice = bulk(lattice, cubic=True)
h, k, l = indices
h0, k0, l0 = (indices == 0)
if termination != 0: # changing termination
import warnings
warnings.warn('Work on changing terminations is currently in '
'progress. Code may not behave as expected.')
lattice1 = deepcopy(lattice)
cell = lattice1.get_cell()
pt = [0, 0, 0]
millers = list(indices)
for index, item in enumerate(millers):
if item == 0:
millers[index] = 10 ** 9 # make zeros large numbers
elif pt == [0, 0, 0]: # for numerical stability
pt = list(cell[index] / float(item) /
np.linalg.norm(cell[index]))
h1, k1, l1 = millers
N = np.array(cell[0] / h1 + cell[1] / k1 + cell[2] / l1)
n = N / np.linalg.norm(N) # making a unit vector normal to cut plane
d = [np.round(np.dot(n, (a - pt)), 4)
for a in lattice.get_scaled_positions()]
d = set(d)
d = sorted(list(d))
d = [0] + d # distances of atoms from cut plane
displacement = (h * cell[0] + k * cell[1] +
l * cell[2]) * d[termination]
lattice1.positions += displacement
lattice = lattice1
if h0 and k0 or h0 and l0 or k0 and l0: # if two indices are zero
if not h0:
c1, c2, c3 = [(0, 1, 0), (0, 0, 1), (1, 0, 0)]
if not k0:
c1, c2, c3 = [(0, 0, 1), (1, 0, 0), (0, 1, 0)]
if not l0:
c1, c2, c3 = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
else:
p, q = ext_gcd(k, l)
a1, a2, a3 = lattice.cell
# constants describing the dot product of basis c1 and c2:
# dot(c1,c2) = k1+i*k2, i in Z
k1 = np.dot(p * (k * a1 - h * a2) + q * (l * a1 - h * a3),
l * a2 - k * a3)
k2 = np.dot(l * (k * a1 - h * a2) - k * (l * a1 - h * a3),
l * a2 - k * a3)
if abs(k2) > tol:
i = -int(round(k1 / k2)) # i corresponding to the optimal basis
p, q = p + i * l, q - i * k
a, b = ext_gcd(p * k + q * l, h)
c1 = (p * k + q * l, -p * h, -q * h)
c2 = np.array((0, l, -k)) // abs(gcd(l, k))
c3 = (b, a * p, a * q)
surf = build(lattice, np.array([c1, c2, c3]), layers, tol)
return surf
def ext_gcd(a, b):
"""
Extended Euclidean Algorithm. Find the result for ax + by = gcd(a, b).
Parameters
----------
a: int
b: int
"""
if b == 0:
return 1, 0
elif a % b == 0:
return 0, 1
else:
x, y = ext_gcd(b, a % b)
return y, x - y * (a // b)
def build(lattice, basis, layers, tol):
"""
Transform the structure to original surface based on basis.
Parameters
----------
basis: 3 * 3 matrix, [[a, b, c], ...]
the basis vectors of the target surfaces.
lattice: Atoms object or str
Bulk lattice structure of alloy or pure metal. Note that the
unit-cell must be the conventional cell - not the primitive cell.
One can also give the chemical symbol as a string, in which case the
correct bulk lattice will be generated automatically.
layers: int
Number of equivalent layers of the slab.
"""
surf = lattice.copy()
scaled = solve(basis.T, surf.get_scaled_positions().T).T
scaled -= np.floor(scaled + tol)
surf.set_scaled_positions(scaled)
surf.set_cell(np.dot(basis, surf.cell), scale_atoms=True)
surf *= (1, 1, layers)
return surf
def modify_cell(structure):
"""
This is the final step of a molecular reconstruction step, and would
align z direction to be perpendicular to the surface
Parameters
---------
structure: Atoms object or str
In this structure, the z direction might not be perpendicular to the
target surface.
"""
slab = structure.copy()
a1, a2, a3 = slab.cell
slab.set_cell([a1, a2,
np.cross(a1, a2) * np.dot(a3, np.cross(a1, a2)) /
norm(np.cross(a1, a2)) ** 2])
# Change unit cell to have the x-axis parallel with a surface vector
# and z perpendicular to the surface:
a1, a2, a3 = slab.cell
slab.set_cell([(norm(a1), 0, 0),
(np.dot(a1, a2) / norm(a1),
np.sqrt(norm(a2) ** 2 - (np.dot(a1, a2) / norm(a1)) ** 2), 0),
(0, 0, norm(a3))],
scale_atoms=True)
slab.pbc = (True, True, False)
scaled = slab.get_scaled_positions()
scaled[:, :2] %= 1
slab.set_scaled_positions(scaled)
return slab
def handle_with_molecules(slab_move, delta, down=True):
"""
Move some very tiny fragments of broken molecule to the other side. This is
a preparation step for the move_method, which could minimize the limitations.
Parameters
----------
slab_move: Atoms structure
slab_move is the original surfaces that is generated by ase library.
delta: list of double, [delta_x, delta_y, delta_z]
Add or subtract the delta (cart_coords) to the tiny broken molecules to
initially repair parts of molecules.
down: bool
True: Add a delta to the tiny broken molecules that are located at the bottom,
False: subtract a delta to the tiny broken molecules that are located at the top.
"""
slab_sg = StructureGraph.with_local_env_strategy(slab_move, JmolNN())
slab_supercell_sg = slab_sg * (3, 3, 1)
slab_sg_graph = nx.Graph(slab_supercell_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(slab_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(slab_supercell_sg.structure[n].specie))
molecules = []
for subgraph in super_subgraphs:
coords = [slab_supercell_sg.structure[n].coords
for n in subgraph.nodes()]
# get the frac_cood of every atom for every molecules
coord_z_list = [slab_move.lattice.get_fractional_coords(
coord)[-1] for coord in coords]
if down is True:
temp = [coord_z < 0.5 for coord_z in coord_z_list]
else:
temp = [coord_z > 0.5 for coord_z in coord_z_list]
if not all(temp) or len(coords) > 6:
continue
species = [slab_supercell_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
# molecules are the list of molecules that need to be moved
move_list = []
move_sites = reduced_sites(molecules, slab_move)
for move_site in move_sites:
for i, atom in enumerate(slab_move):
if atom.is_periodic_image(move_site):
move_list.append(i)
break
coords_move = slab_move.cart_coords
species_move = slab_move.species
slab_move.remove_sites(move_list)
for i in move_list:
if down is True:
new_coord = np.array(coords_move[i]) + np.array(delta)
else:
new_coord = np.array(coords_move[i]) - np.array(delta)
slab_move.append(species_move[i], new_coord, coords_are_cartesian=True)
return slab_move
def Find_Broken_Molecules(slab, sg, species_intact, coords_intact, unique_bulk_subgraphs):
"""
Use molecular identification method to find those molecules in the surface
that are different from that in the bulk.
Parameters
----------
slab: Atoms structure
The surface that is generated by ase library and might have broken molecules.
sg: list of Molecules
Unique Molecules in bulk Structure.
species_intact: list, ['specie_1', 'specie_2', ...]
A list of atomic species of intact molecules.
coords_intact: list, [[coord_1_1, coord_1_2, coord_1_3], ...]
A list of atomic cart_coords of intact molecules.
unique_bulk_subgraphs: list of graphs
A list of intact molecules' graphs. Note that every graph is this list
is unique
"""
slab_sg = StructureGraph.with_local_env_strategy(slab, JmolNN())
# enlarge the cell to a (3 * 3 * 1) super_cell
slab_supercell_sg = slab_sg * (3, 3, 1)
different_subgraphs_in_slab, slab_molecules = \
get_slab_different_subgraphs(slab_supercell_sg, unique_bulk_subgraphs)
slab_molecules = double_screen(slab_molecules, sg)
# the molecules in slab_original would be the template
#print("The number of molecules that need to be fixed : " +
# str(len(slab_molecules)))
# slab_molecules are the molecules that are broken and need to be fixed
delete_sites = reduced_sites(slab_molecules, slab)
# delete_list is the list of broken atoms
delete_list = []
for delete_site in delete_sites:
for i, atom in enumerate(slab):
if atom.is_periodic_image(delete_site):
delete_list.append(i)
break
species_all = slab.species
coords_all = slab.cart_coords
for i, atom in enumerate(slab):
temp = [i == delete for delete in delete_list]
if not any(temp):
species_intact.append(species_all[i])
coords_intact.append(coords_all[i])
delete_list = []
# remove intact molecules in the slab for convenience
#print("Delete all atoms!")
for i, atom in enumerate(slab):
delete_list.append(i)
slab.remove_sites(delete_list)
sites = []
for slab_molecule in slab_molecules:
for curr_site in slab_molecule:
curr_site = mg.PeriodicSite(curr_site.specie,
curr_site.coords,
slab.lattice,
coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
for site in sites:
# add the broken molecules into the system
slab.append(species=site.specie, coords=site.coords,
coords_are_cartesian=True)
return slab
def get_broken_molecules(self, bulk_subgraphs, use_weights=False):
# compare each molecule in slab to each molecule in the bulk,
# get rid of isomorohic, molecules store the brokens
"""
Retrieve broken_subgraphs as molecules
Will return nonunique molecules, duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Returns:
-------
: list of nonunique broken Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# remove complete molecules in subgraphs
different_subgraphs = []
start = time.time()
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=nm)
for g in bulk_subgraphs]
if not any(already_present):
different_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in different_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
#molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
# now define how we test for isomorphism
def node_match(n1, n2):
"""the strategy for node matching in is_isomorphic.
Parameters
------
n1, n2 : node
Returns:
-------
True of false : bool
based on whether the species of two nodes are the same.
"""
return n1['specie'] == n2['specie']
def get_bulk_molecules(self, use_weights=False):
# get rid of the repetitve molecule in bulk, only left with unique molecule######
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Parameters:
------
use_weights: (bool) If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
Returns:
-------
list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
# enlarge the structureGraph object to a supercell
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
# create networkx undirected graph object to
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# store the input graph
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(
supercell_sg.graph))
# add specie names to graph to be able to test for isomorphism
for subgraph in all_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in all_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
molecules.append(molecule)
return molecules, unique_subgraphs
#################convert to undirected mx.graph and then determine if isomorphic###############
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
Parameters:
----------
other: MoleculeGraph object to be compared.
Returns:
-------
bool
"""
if self.molecule.composition != other.molecule.composition:
return False
else:
self_undir = self.graph.to_undirected()
other_undir = other.graph.to_undirected()
nm = iso.categorical_node_match("specie", "ERROR")
isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)
return isomorphic
def reduced_sites(molecules, slab):
"""
Find atoms that appear again due to the periodicity.
Parameters:
-----------
molecules: List[molecule].
All molecules that might be within or out of the slab boundary.
slab: ASE structure.
Slab structure.
Returns:
--------
sites: List[atom].
"""
sites = []
for molecule in molecules:
for curr_site in molecule:
curr_site = PeriodicSite(
curr_site.specie, curr_site.coords, slab.lattice, coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
return sites
def is_isomorphic(molecule1, molecule2):
"""
Determin whether two molecules are the same.
Parameters:
-----------
molecule1 and molecule2.
Returns:
--------
bool.
"""
return isomorphic_to(MoleculeGraph.with_local_env_strategy(molecule1, JmolNN()), MoleculeGraph.with_local_env_strategy(molecule2, JmolNN()))
def double_screen(slab_molecules, bulk_molecules):
"""
Double check with bulk if there is any molecule already present in bulk
"""
delete_list = []
for bulk_molecule in bulk_molecules:
for i, slab_molecule in enumerate(slab_molecules):
if is_isomorphic(bulk_molecule, slab_molecule):
delete_list.append(i)
tmp = [x for i, x in enumerate(slab_molecules) if i not in delete_list]
return tmp
def print_run_time(func):
"""
One wrapper that output the run_time of a funtion.
"""
@wraps(func)
def wrapper(*args, **kw):
local_time = time.time()
func(*args, **kw)
print('Current Function [%s] run time is %.2fs' %
(func.__name__, time.time() - local_time))
return wrapper
def updatePOSCAR(output_file):
"""This function is used to correct the output file (POSCAR) of ase.
Parameters:
----------
output_file : str
The file of surface written by the write function of ase.
Returns:
-------
file : str
The file that is corrected.
"""
with open(output_file, 'r') as original_file:
lines = original_file.readlines()
line1 = lines[0]
lines.insert(5, " " + line1)
with open(output_file, 'w') as final_file_1:
for i in range(len(lines)):
final_file_1.writelines(lines[i])
structure = mg.Structure.from_file(output_file)
lattice = Lattice(structure.lattice.matrix)
frac_coords = lattice.get_fractional_coords(structure.cart_coords)
for i in range(frac_coords.shape[0]):
for j in range(frac_coords.shape[1]):
if abs(frac_coords[i][j] - 1) < 1e-5:
frac_coords[i][j] = 1
if abs(frac_coords[i][j] - 0) < 1e-5:
frac_coords[i][j] = 0
with open(output_file, 'r') as final_file_2:
lines = final_file_2.readlines()
lines[7] = 'Direct' + '\n'
for i in range(np.array(frac_coords).shape[0]):
lines[8 + i] = " " + str(np.array(frac_coords)[i, :][0]) + ' ' + str(np.array(frac_coords)[i, :][1]) +\
' ' + str(np.array(frac_coords)[i, :][2]) + '\n'
with open(output_file, 'w') as final_file:
for i in range(len(lines)):
final_file.writelines(lines[i])
def edge_match(e1, e2):
"""the strategy for edge matching in is_isomorphic.
Parameters:
----------
e1, e2 : edge).
Returns:
-------
True or false : bool
based on whether the length of bonds are the same or close to each other.
"""
return abs(e1['weight'] - e2['weight']) / e2['weight'] < 1e-5
def get_bulk_subgraphs(bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk.
Parameters:
-----------
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
in_boundary = any([d['to_jimage'] == (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if in_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
for subgraph in super_subgraphs:
if len(subgraph) == 1 and "H" in [str(bulk_structure_sg.structure[n].specie) for n in subgraph.nodes()]:
super_subgraphs.remove(subgraph)
continue
molecules = []
for subgraph in super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return super_subgraphs, molecules
def get_bulk_subgraphs_v2(bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk.
Parameters:
-----------
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
for subgraph in all_super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
molecules = []
for subgraph in all_super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return all_super_subgraphs, molecules
def get_bulk_subgraphs_v3(slab_first, bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk and generate HashGraph to modify atoms' positions.
Parameters:
-----------
slab_first: pymatgen structure.
Original slab structure that cleaved by ASE
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
delta_cart: List[float].
c_difference between two adajacent layers.
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
for subgraph in all_super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
frac_coods = [0] * len(bulk_structure_sg.structure)
initial_index = -100
molecules = []
for subgraph in all_super_subgraphs:
HashGraph = {}
for u, v, d in subgraph.edges(data=True):
change_z = list(d['to_jimage'])[-1]
if change_z != 0:
change_z = 1 if slab_first.lattice.get_fractional_coords(bulk_structure_sg.structure[u].coords)[-1] > slab_first.lattice.get_fractional_coords(bulk_structure_sg.structure[v].coords)[-1] else -1
try:
HashGraph[str(u)].append([str(v), change_z])
except KeyError:
HashGraph[str(u)] = [initial_index, [str(v), change_z]]
try:
HashGraph[str(v)].append([str(u), -change_z])
except KeyError:
HashGraph[str(v)] = [initial_index, [str(u), -change_z]]
first_node = list(HashGraph.keys())[0]
count = 1
HashGraph[first_node][0] = 0
Pending_node = [first_node]
Pending_node_2 = []
while(count < len(list(HashGraph.keys()))):
for node in Pending_node:
for value in HashGraph[node][1: ]:
if HashGraph[value[0]][0] == initial_index:
count += 1
HashGraph[value[0]][0] = HashGraph[node][0] + value[1]
Pending_node_2.append(value[0])
Pending_node = deepcopy(Pending_node_2)
Pending_node_2 = []
# min_z = min([value[0] for value in HashGraph.values()])
min_z = int(Counter([value[0] for value in HashGraph.values()]).most_common(1)[0][0])
delta = np.array(slab_first.lattice.matrix[-1])
for key in HashGraph.keys():
HashGraph[key][0] -= min_z
coords = [bulk_structure_sg.structure[n].coords + delta * HashGraph[str(n)][0]
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return delta, all_super_subgraphs, molecules
def get_bulk_subgraphs_unique(bulk_structure_sg):
"""
get unique subgraphs of bulk based on graph algorithm.
This function would only return unique molecules and its graphs,
but not any duplicates present in the crystal.
(A duplicate defined as an isomorphic crystals.
Parameters:
-----------
bulk_structure_sg : nx.SturctureGraph class,
this one is actually the supercell one that is equal to(3, 3, 3) * unit cell.
Returns:
--------
unique_super_graphs : (list) [graph, ...],
represent the unique subgraphs in the supercell and expecially
in the boundary of supercell.
molecules : (list) [molecule, ...],
represent the molecules that are correlated to the unque subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
unique_super_subgraphs = []
for subgraph in super_subgraphs:
if len(subgraph) == 1 and "H" in [str(bulk_structure_sg.structure[n].specie) for n in subgraph.nodes()]:
continue
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_super_subgraphs]
if not any(already_present):
unique_super_subgraphs.append(subgraph)
molecules = []
for subgraph in unique_super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return unique_super_subgraphs, molecules
def get_slab_different_subgraphs(slab_supercell_sg, unique_super_bulk_subgraphs):
"""this function is used to find all the subgraphs in slab that
are different from those in bulk.
Parameters:
----------
slab_supercell_sg : nx.StructureGraph,
the graph of the whole slabs.
Note: In order to thoughtoutly describe the graph,
the slab_supercell_sg = (3, 3, 1) * slab_sg
unique_super_bulk_subgraphs : list.
Returns:
-------
different_subgraphs : list
[different_subgraph, ...], which is the list of subgraphs that
are different from those in bulk. In this function,
we would only find the different subgraphs based on its species.
slab_molecules : list
[slab_molecule, ...], slab_molecule is the mg.Molecule of diffenert_subgraphs.
"""
slab_supercell_sg_graph = nx.Graph(slab_supercell_sg.graph)
all_subgraphs = list(nx.connected_component_subgraphs
(slab_supercell_sg_graph))
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersets_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersets_boundary:
molecule_subgraphs.append(subgraph)
#print("molecule_subgraphs : ", len(molecule_subgraphs))
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(
slab_supercell_sg.structure[n].specie))
nm = iso.categorical_node_match("specie", "ERROR")
different_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=nm)
for g in unique_super_bulk_subgraphs]
if not any(already_present):
different_subgraphs.append(subgraph)
slab_molecules = []
for subgraph in different_subgraphs:
coords = [slab_supercell_sg.structure[n].coords
for n in subgraph.nodes()]
species = [slab_supercell_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
slab_molecules.append(molecule)
return different_subgraphs, slab_molecules
def belong_to(species1, species2):
"""
Determine whether species1 are totally included by species2.
"""
if len(species1) > len(species2):
return False
i = 0
species_1 = species1[:]
species_2 = species2[:]
while i < len(species_1):
find = False
for j in range(len(species_2)):
if species_1[i] == species_2[j]:
del species_1[i]
find = True
del species_2[j]
break
if find is False:
return False
return True
def length_belong_to(weights1, weights2):
"""
Determine whether weights1 are totally included by weights2
weights are the list [weight, weight, ...] of one node
"""
if len(weights1) > len(weights2):
return False
i = 0
weights_1 = weights1[:]
weights_2 = weights2[:]
while i < len(weights_1):
find = False
for j in range(len(weights_2)):
if abs((weights_1[i] - weights_2[j]) / weights_2[j]) < 1e-5:
del weights_1[i]
find = True
del weights_2[j]
break
if find is False:
return False
return True
def weights_all_belong_to(all_weight1, all_weight2, species1, species2):
"""
Determine whether one graph is totally included by another graph by
comparing species and weights.
"""
if len(all_weight1) > len(all_weight2):
return False
i = 0
account = 0
total = len(all_weight1)
all_weight_1 = all_weight1[:]
all_weight_2 = all_weight2[:]
species_1 = species1[:]
species_2 = species2[:]
while i < len(all_weight_1):
find = False
for j in range(len(all_weight_2)):
if length_belong_to(all_weight_1[i], all_weight_2[j]) and species_1[i] == species_2[j]:
del all_weight_1[i]
del species_1[i]
del species_2[j]
account += 1
del all_weight_2[j]
find = True
break
if not find:
i += 1
if account >= 2.0 / 3.0 * total:
return True
return False
def brokenMolecules_and_corresspoundingIntactMolecules(new_different_subgraphs,
unique_super_subgraphs):
"""
NOT used in current reconstruction method!!!
Determine the intact molecules that each molecule (broken or intact) belongs to by
compcomparing the species and weights of broken molecules and intact
molecules.
Parameters:
-----------
new_different_subgraphs: List[subgraph].
Subgraphs of all molecules.
unique_super_subgraphs: List[subgraph].
Subgraphs of all bulk's unique molecules.
Returns:
--------
qualified_subgraphs: List[subgraph].
List of subgraph of molecules in the raw slab.
unique_super_subgraphs: List[subgraph].
List of subgraph of corresspounding intact molecules. The length of
qualified_unique_subgraphs should be the same as the length of
qualified_subgraphs.
"""
qualified_subgraphs = []
qualified_unique_subgraphs = []
# account = 1
#print("trying to find the connection between broken molecules "
# "and intact molecules")
for subgraph in new_different_subgraphs:
subgraph_species = []
weights_all = []
for n, nbrs in subgraph.adjacency():
subgraph_species.append(subgraph.node[n]['specie'])
weights = []
for nbr, eattr in nbrs.items():
weights.append(eattr['weight'])
weights_all.append(weights)
find = False
for unique_subgraph in unique_super_subgraphs:
unique_subgraph_species = []
unique_weights_all = []
for n, nbrs in unique_subgraph.adjacency():
unique_subgraph_species.append(
unique_subgraph.node[n]['specie'])
weights = []
for nbr, eattr in nbrs.items():
weights.append(eattr['weight'])
unique_weights_all.append(weights)
if not belong_to(subgraph_species, unique_subgraph_species):
continue
else:
if not weights_all_belong_to(weights_all, unique_weights_all,
subgraph_species,
unique_subgraph_species):
continue
else:
find = True
qualified_subgraphs.append(subgraph)
qualified_unique_subgraphs.append(unique_subgraph)
break
if find is False:
print("can't find the qualified subgraphs")
sys.exit()
return qualified_subgraphs, qualified_unique_subgraphs
def fix_broken_molecules(qualified_subgraphs,
qualified_unique_subgraphs,
bulk_super_structure_sg,
slab_supercell_sg,
slab, c_frac_min, fixed_c_negative=False):
"""
NOT used in the current reconstruction method!!!
Fix broken molecules based on graph theory. After determine the affiliation
between all molecules in raw slabs and intact molecules in the original
bulk, this function would replace those broken molecules with intact
molecules.
Parameters:
-----------
qualified_subgraphs: List[subgraph].
List of subgraphs of all molecules in the raw molecules.
qualified_unique_subgraphs: List[subgraph].
Each element in the list is the subgraph of corresspounding intact
molecule of "qualified_subgraphs" in the previous list.
bulk_super_structure_sg: StructureGraph.
Structure Graph of supercell (3 x 3 x 3) of original bulk.
slab_supercell_sg: StructureGraph.
Structure Graph of supercell (3 x 3 x 3) of raw slab.
slab: ASE structure.
Raw slab after ASE cleaving.
c_frac_min: float.
Fractional coordinate of the lowest atom in raw slabs in c direction.
fixed_c_negative: bool
Fix the broken molecules in the lower side or not? Default option is
False.
Returns:
--------
slab: pymatgen structure.
Slab after reconstruction.
"""
molecules_new = []
#print("trying to fix the broken molecules...")
for i in range(len(qualified_subgraphs)):
qualified_subgraphs_species = []
qualified_subgraphs_nodes_neibs = []
qualified_subgraphs_all_weights = []
nodes_qualified_subgraphs = []
for n, nbrs in qualified_subgraphs[i].adjacency():
nodes_qualified_subgraphs.append(n)
neibs = []
weights = []
qualified_subgraphs_species.append(
qualified_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
qualified_subgraphs_nodes_neibs.append(neibs)
qualified_subgraphs_all_weights.append(weights)
qualified_unique_subgraphs_species = []
qualified_unique_subgraphs_nodes_neibs = []
qualified_unique_subgraphs_all_weights = []
nodes_qualified_unique_subgraphs = []
for n, nbrs in qualified_unique_subgraphs[i].adjacency():
nodes_qualified_unique_subgraphs.append(n)
neibs = []
weights = []
qualified_unique_subgraphs_species.append(
qualified_unique_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
qualified_unique_subgraphs_all_weights.append(weights)
qualified_unique_subgraphs_nodes_neibs.append(neibs)
node1 = []
node2 = []
account = 0
for t in range(len(qualified_subgraphs_species)):
account = 0
for k in range(len(qualified_unique_subgraphs_species)):
account = 0
if qualified_subgraphs_species[t] == qualified_unique_subgraphs_species[k] \
and length_belong_to(qualified_subgraphs_all_weights[t],
qualified_unique_subgraphs_all_weights[k]) \
and len(qualified_subgraphs_all_weights[t]) == 3:
node1 = [nodes_qualified_subgraphs[t]]
node2 = [nodes_qualified_unique_subgraphs[k]]
account = 0
for a_index, a_weight in enumerate(qualified_subgraphs_all_weights[t]):
for index, weight in enumerate(qualified_unique_subgraphs_all_weights[k]):
has1 = qualified_subgraphs_nodes_neibs[t][a_index] in node1
has2 = qualified_unique_subgraphs_nodes_neibs[k][index] in node2
if abs(weight - a_weight) / weight < 1e-5 and has1 is False and has2 is False:
node1.append(
qualified_subgraphs_nodes_neibs[t][a_index])
node2.append(
qualified_unique_subgraphs_nodes_neibs[k][index])
account += 1
break
if account >= 3:
break
if account >= 3:
break
if account < 3:
print("can't find the corresspounding point")
sys.exit()
coords1 = [slab_supercell_sg.structure[n].coords for n in node1]
coords2 = [bulk_super_structure_sg.structure[n].coords for n in node2]
relative1 = np.array([np.array(coords1[n]) - np.array(coords1[0])
for n in list(range(1, 4))])
relative2 = np.array([np.array(coords2[n]) - np.array(coords2[0])
for n in list(range(1, 4))])
try:
rotationMatrix = np.dot(relative1.T, np.linalg.inv(relative2.T))
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
for m in range(relative1.shape[0]):
if relative1[m, 0] == 0 and relative1[m, 1] == 0 and relative1[m, 2] == 0:
relative1[m, 0] = 1e-9
relative1[m, 2] = -1e-9
for m in range(relative1.shape[1]):
if relative1[0, m] == 0 and relative1[1, m] == 0 and relative1[2, m] == 0:
relative1[0, m] = 1e-9
relative1[2, m] = -1e-9
for m in range(relative2.shape[0]):
if relative2[m, 0] == 0 and relative2[m, 1] == 0 and relative2[m, 2] == 0:
relative2[m, 0] = 1e-9
relative2[m, 2] = -1e-9
for m in range(relative2.shape[1]):
if relative2[0, m] == 0 and relative2[1, m] == 0 and relative2[2, m] == 0:
relative2[0, m] = 1e-9
relative2[2, m] = -1e-9
rotationMatrix = np.dot(
relative1.T, np.linalg.inv(relative2.T))
else:
print('failed')
sys.exit()
relative = np.array([np.array(bulk_super_structure_sg.structure[n].coords)
- | np.array(coords2[0]) | numpy.array |
from gehm.utils.np_distances import *
from tests.test_data import create_test_data
import pytest
import numpy as np
import torch
from numpy import cos, sin
@pytest.mark.distances
def test_nx_second_order_proximity(create_test_data):
G,G_undir=create_test_data
nodes=np.array(G.nodes)
# Test 1: Ordering when subset proximity
sel1=nodes[[0,1,2,3]]
sel2=nodes[[0,3,2,1]]
prox1=nx_second_order_proximity(G,sel1,False)
prox2=nx_second_order_proximity(G,sel2,False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 2: Ordering when whole network proximity
prox1=nx_second_order_proximity(G,sel1,True)
prox2=nx_second_order_proximity(G,sel2,True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[1,3],prox2[3,3])
assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,1])
# Test 3+4: Without row normalization
prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False)
prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False)
prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 5+6: Without row normalization, but with batch normalization
prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False, norm_rows_in_sample=True)
prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False, norm_rows_in_sample=True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, norm_rows_in_sample=True)
prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, norm_rows_in_sample=True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 7: Whole network, but return batch order
prox1=nx_second_order_proximity(G,sel1,True, norm_rows=True, to_batch=True)
prox2=nx_second_order_proximity(G,sel2,True, norm_rows=True, to_batch=True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])
assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3])
# Test 8: Whole network, but return batch order, no norm
prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, to_batch=True)
prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, to_batch=True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])
assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3])
# Now repeat everything with an undirected graph:
G=G_undir
nodes=np.array(G.nodes)
# Test 1: Ordering when subset proximity
sel1=nodes[[0,1,2,3]]
sel2=nodes[[0,3,2,1]]
prox1=nx_second_order_proximity(G,sel1,False)
prox2=nx_second_order_proximity(G,sel2,False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 2: Ordering when whole network proximity
prox1=nx_second_order_proximity(G,sel1,True)
prox2=nx_second_order_proximity(G,sel2,True)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 3+4: Without row normalization
prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False)
prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False)
prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False)
prox1=np.round(prox1,5)
prox2=np.round(prox2,5)
assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])
# Test 5+6: Without row normalization, but with batch normalization
prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False, norm_rows_in_sample=True)
prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False, norm_rows_in_sample=True)
prox1= | np.round(prox1,5) | numpy.round |
from resample import _util as u
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
@pytest.mark.parametrize("method", (0, 1))
def test_rcont_1(method):
m = np.arange(6).reshape((3, 2))
r = np.sum(m, axis=1)
c = np.sum(m, axis=0)
rng = np.random.default_rng(1)
for w in u.rcont(5, r, c, method, rng):
assert_equal(np.sum(w, axis=0), c)
assert_equal(np.sum(w, axis=1), r)
@pytest.mark.parametrize("shape", ((2, 3), (3, 2), (3, 3), (3, 4), (4, 3)))
def test_rcont_2(shape):
m = np.arange(np.prod(shape)).reshape(shape)
r = np.sum(m, axis=1)
c = np.sum(m, axis=0)
# Patefield should give same results if zero row or column is inserted
rng = np.random.default_rng(1)
w1 = u.rcont(5, r, c, 1, rng)
r2 = np.zeros(len(r) + 1)
r2[0] = r[0]
r2[2:] = r[1:]
rng = np.random.default_rng(1)
w2 = u.rcont(5, r2, c, 1, rng)
assert_equal(w2[:, 1, :], 0)
mask = np.ones(w2.shape[1], dtype=bool)
mask[1] = False
assert_equal(w2[:, mask, :], w1)
c2 = np.zeros(len(c) + 1)
c2[0] = c[0]
c2[2:] = c[1:]
rng = np.random.default_rng(1)
w2 = u.rcont(5, r, c2, 1, rng)
assert_equal(w2[:, :, 1], 0)
mask = np.ones(w2.shape[2], dtype=bool)
mask[1] = False
assert_equal(w2[:, :, mask], w1)
rng = np.random.default_rng(1)
w2 = u.rcont(5, r2, c2, 1, rng)
assert_equal(w2[:, 1, 1], 0)
r_mask = np.ones(w2.shape[1], dtype=bool)
r_mask[1] = False
c_mask = np.ones(w2.shape[2], dtype=bool)
c_mask[1] = False
assert_equal(w2[:, r_mask, :][:, :, c_mask], w1)
def test_rcont_bad_method():
m = np.arange(6).reshape(3, 2)
r = np.sum(m, axis=1)
c = np.sum(m, axis=0)
rng = np.random.default_rng(1)
with pytest.raises(ValueError):
u.rcont(5, r, c, 2, rng)
@pytest.mark.parametrize("method", (0, 1))
def test_rcont_bad_input(method):
m = np.arange(6).reshape(3, 2)
r = np.sum(m, axis=1)
c = np.sum(m, axis=0)
rng = | np.random.default_rng(1) | numpy.random.default_rng |
"""
Project: RadarBook
File: ovals_of_cassini_example.py
Created by: <NAME>
On: 7/2/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter04.ui.OvalsOfCassini_ui import Ui_MainWindow
from numpy import linspace, log10, sqrt, sin, cos, imag, real
from scipy.constants import c, Boltzmann as k, pi
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class OvalsOfCassini(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.separation_distance.returnPressed.connect(self._update_canvas)
self.system_temperature.returnPressed.connect(self._update_canvas)
self.bandwidth.returnPressed.connect(self._update_canvas)
self.noise_figure.returnPressed.connect(self._update_canvas)
self.transmit_losses.returnPressed.connect(self._update_canvas)
self.receive_losses.returnPressed.connect(self._update_canvas)
self.peak_power.returnPressed.connect(self._update_canvas)
self.transmit_antenna_gain.returnPressed.connect(self._update_canvas)
self.receive_antenna_gain.returnPressed.connect(self._update_canvas)
self.frequency.returnPressed.connect(self._update_canvas)
self.bistatic_target_rcs.returnPressed.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.axes1 = fig.add_subplot(111)
self.my_canvas = FigureCanvas(fig)
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes an input value.
:return:
"""
# Get the values from the form
separation_distance = float(self.separation_distance.text())
system_temperature = float(self.system_temperature.text())
bandwidth = float(self.bandwidth.text())
noise_figure = float(self.noise_figure.text())
transmit_losses = float(self.transmit_losses.text())
receive_losses = float(self.receive_losses.text())
peak_power = float(self.peak_power.text())
transmit_antenna_gain = float(self.transmit_antenna_gain.text())
receive_antenna_gain = float(self.receive_antenna_gain.text())
frequency = float(self.frequency.text())
bistatic_target_rcs = float(self.bistatic_target_rcs.text())
# Number of points for plotting ovals
number_of_points = 100000
# Parameters for the Cassini ovals equation
# r ^ 4 + a ^ 4 - 2 a ^ 2 r ^ 2(1 + cos(2 theta)) = b ^ 4
# Parameter "a"
a = 0.5 * separation_distance
# Calculate the wavelength (m)
wavelength = c / frequency
# Calculate the bistatic radar range factor
bistatic_range_factor = (peak_power * transmit_antenna_gain * receive_antenna_gain * wavelength ** 2 *
10.0 ** (bistatic_target_rcs / 10.0)) / ((4.0 * pi) ** 3 * k * system_temperature *
bandwidth * 10.0 ** (noise_figure / 10.0)
* transmit_losses * receive_losses)
# Full angle sweep
t = linspace(0, 2.0 * pi, number_of_points)
# Calculate the signal to noise ratio at which a = b
SNR_0 = 10.0 * log10(16.0 * bistatic_range_factor / separation_distance ** 4)
# Create the list of signal to noise ratios to plot
SNR = [SNR_0 - 6, SNR_0 - 3, SNR_0, SNR_0 + 3]
# Clear the axes for the updated plot
self.axes1.clear()
# Loop over all the desired signal to noise ratios
for s in SNR:
# Convert to linear units
snr = 10.0 ** (s / 10.0)
# Parameter for Cassini ovals
b = (bistatic_range_factor / snr) ** 0.25
if a > b:
# Calculate the +/- curves
r1 = sqrt(a ** 2 * (cos(2.0 * t) + sqrt(cos(2 * t) ** 2 - 1.0 + (b / a) ** 4)))
r2 = sqrt(a ** 2 * (cos(2.0 * t) - sqrt(cos(2 * t) ** 2 - 1.0 + (b / a) ** 4)))
# Find the correct indices for imaginary parts = 0
i1 = imag(r1) == 0
i2 = imag(r2) == 0
r1 = real(r1)
r2 = real(r2)
# Plot both parts of the curve
label_text = "SNR = {:.1f}".format(s)
self.axes1.plot(r1[i1] * cos(t[i1]), r1[i1] * sin(t[i1]), 'k.', label=label_text)
self.axes1.plot(r2[i2] * | cos(t[i2]) | numpy.cos |
# -*- coding: utf-8 -*-
# @Time : 2018/12/21 11:12
# @Author : Alan
# @Email : <EMAIL>
# @File : Utils.py
# @Software: PyCharm
import tensorflow as tf
import time
from datetime import timedelta
import numpy as np
from collections import defaultdict
import pickle
import os
from collections import Counter
UNKNOWN = '<UNK>'
PADDING = '<PAD>'
# print tensor shape
def print_shape(varname, var):
"""
:param varname: tensor name
:param var: tensor variable
"""
try:
print('{0} : {1}'.format(varname, var.get_shape()))
except:
print('{0} : {1}'.format(varname, np.shape(var)))
# print log info on SCREEN and LOG file simultaneously
def print_log(*args, **kwargs):
print(*args)
if len(kwargs) > 0:
print(*args, **kwargs)
return None
# print all used hyper-parameters on both SCREEN an LOG file
def print_args(args, log_file):
"""
:Param args: all used hyper-parameters
:Param log_f: the log life
"""
argsDict = vars(args)
argsList = sorted(argsDict.items())
print_log("------------- HYPER PARAMETERS -------------", file = log_file)
for a in argsList:
print_log("%s: %s" % (a[0], str(a[1])), file = log_file)
print("-----------------------------------------", file = log_file)
return None
# time cost
def get_time_diff(startTime):
endTime = time.time()
diff = endTime - startTime
return timedelta(seconds = int(round(diff)))
# count the number of trainable parameters in model
def count_parameters():
totalParams = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variableParams = 1
for dim in shape:
variableParams *= dim.value
totalParams += variableParams
return totalParams
# 余弦相似度计算
def feature2cos_sim(feat_q, feat_a):
# feat_q: 2D:(bz, hz)
norm_q = tf.sqrt(tf.reduce_sum(tf.multiply(feat_q, feat_q), 1))
norm_a = tf.sqrt(tf.reduce_sum(tf.multiply(feat_a, feat_a), 1))
mul_q_a = tf.reduce_sum(tf.multiply(feat_q, feat_a), 1)
cos_sim_q_a = tf.div(mul_q_a, tf.multiply(norm_q, norm_a))
return tf.clip_by_value(cos_sim_q_a, 1e-5, 0.99999)
# margin Loss
def cal_loss_and_acc(ori_cand, ori_neg, M):
# the target function
zero = tf.fill(tf.shape(ori_cand), 0.0)
margin = tf.fill(tf.shape(ori_cand), M) # 0.2
# 采用margin loss作为损失函数,希望正样本分数越高越好,负样本分数越低越好
with tf.name_scope("loss"):
losses = tf.maximum(zero, tf.subtract(margin, tf.subtract(ori_cand, ori_neg)))
loss = tf.reduce_sum(losses)
# cal accurancy
# 该acc计算方式有待考量
with tf.name_scope("acc"):
correct = tf.equal(zero, losses)
acc = tf.reduce_mean(tf.cast(correct, "float"), name="acc")
return loss, acc
def map_score(qids, labels, preds):
""""""
labels = [int(i) for i in labels]
qid2cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
qid2cand[qid].append((pred, label))
average_precs = []
for qid, candidates in qid2cand.items():
average_prec = 0
running_correct_count = 0
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if label > 0:
running_correct_count += 1
average_prec += float(running_correct_count) / i
average_precs.append(average_prec / (running_correct_count + 1e-6))
map_score = sum(average_precs) / len(average_precs)
return map_score
def mean_reciprocal_rank(qids, labels, preds):
""""""
labels = [int(i) for i in labels]
qid2cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
qid2cand[qid].append((pred, label))
mrr = 0.
for qid, candidates in qid2cand.items():
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if label > 0:
mrr += 1. / i
break
mrr /= len(qid2cand)
return mrr
def precision_at_k(qids, labels, preds, k=1):
""""""
labels = [int(i) for i in labels]
qid2cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
qid2cand[qid].append((pred, label))
p_at_k = 0.0
good_qids = []
for qid, candidates in qid2cand.items():
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if i == k and label > 0:
p_at_k += 1.
good_qids.append(qid)
if i > k:
break
p_at_k /= len(qid2cand)
return p_at_k
def new_map_score(labels, preds):
"""
:param labels: (questions_classes, list_wise) ----> (873, 15)
:param preds: (questions_classes, list_wise) ----> (873, 15)
:return:
"""
assert len(preds) == len(labels), "Invalid Input! pred:%s label:%s" % (len(preds), len(labels))
qid2cand = defaultdict(list)
for qid in range(len(labels)):
qid2cand[qid] = zip(preds[qid], labels[qid])
average_precs = []
for qid, candidates in qid2cand.items():
average_prec = 0
running_correct_count = 0
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if label > 0:
running_correct_count += 1
average_prec += float(running_correct_count) / i
average_precs.append(average_prec / (running_correct_count + 1e-6))
map_score = sum(average_precs) / len(average_precs)
return map_score
def new_mean_reciprocal_rank(labels, preds):
"""
:param labels: (questions_classes, list_wise) ----> (873, 15)
:param preds: (questions_classes, list_wise) ----> (873, 15)
:return:
"""
assert len(preds) == len(labels), "Invalid Input! pred:%s label:%s" % (len(preds), len(labels))
qid2cand = defaultdict(list)
for qid in range(len(labels)):
qid2cand[qid] = zip(preds[qid], labels[qid])
mrr = 0.
for qid, candidates in qid2cand.items():
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if label > 0:
mrr += 1. / i
break
mrr /= len(qid2cand)
return mrr
def new_precision_at_k(labels, preds, k=1):
"""
:param labels: (questions_classes, list_wise) ----> (873, 15)
:param preds: (questions_classes, list_wise) ----> (873, 15)
:return:
"""
assert len(preds) == len(labels), "Invalid Input! pred:%s label:%s" % (len(preds), len(labels))
qid2cand = defaultdict(list)
for qid in range(len(labels)):
qid2cand[qid] = zip(preds[qid], labels[qid])
p_at_k = 0.0
good_qids = []
for qid, candidates in qid2cand.items():
for i, (score, label) in enumerate(sorted(candidates, key=lambda asd: asd[0], reverse=True), 1):
if i == k and label > 0:
p_at_k += 1.
good_qids.append(qid)
if i > k:
break
p_at_k /= len(qid2cand)
return p_at_k
if __name__ == '__main__':
print(new_map_score(np.array([[0, 0, 1], [0, 0, 1]]), np.array([[0.8, 0.1, 0.1], [0.4, 0.3, 0.3]])))
print(new_map_score(np.array([[0, 0, 1], [0, 0, 1]]), | np.array([[-0.8, -0.1, -0.1], [-0.4, -0.2, -0.2]]) | numpy.array |
# %% [markdown]
# # Load and import
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.plot import gridplot, heatmap, pairplot
from graspy.simulations import sbm
from graspy.utils import binarize, get_lcc, is_fully_connected
from src.data import load_everything
from src.hierarchy import normalized_laplacian, signal_flow
from src.utils import savefig
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
SAVEFIGS = True
DEFAULT_FMT = "png"
DEFUALT_DPI = 150
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=1)
def stashfig(name, **kws):
if SAVEFIGS:
savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)
# %% [markdown]
# # null simulation
def get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=5):
B = np.zeros((n_blocks, n_blocks))
B += low_p
B -= np.diag(np.diag(B))
B -= np.diag(np.diag(B, k=1), k=1)
B += np.diag(diag_p * np.ones(n_blocks))
B += np.diag(feedforward_p * np.ones(n_blocks - 1), k=1)
return B
# block_probs = get_feedforward_B(low_p, diag_p, feedforward_p)
def get_recurrent_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=5):
alternating = np.ones(2 * n_blocks - 1)
alternating[1::2] = 0
late_alternating = | np.ones(2 * n_blocks - 1) | numpy.ones |
#!/usr/bin/env python3
#General Packages
import numpy as np
import matplotlib.pyplot as plt
#Packages to make life easier
import os
from tqdm import tqdm
def find_adjacent(pt, skeleton):
pt_list = []
if skeleton[pt[0]-1][pt[1]-1] == 1:
pt_list.append((pt[0]-1,pt[1]-1))
if skeleton[pt[0]-1][pt[1]] == 1:
pt_list.append((pt[0]-1,pt[1]))
if skeleton[pt[0]-1][pt[1]+1] == 1:
pt_list.append((pt[0]-1, pt[1]+1))
if skeleton[pt[0]][pt[1]-1] == 1:
pt_list.append((pt[0], pt[1]-1))
if skeleton[pt[0]][pt[1]+1] == 1:
pt_list.append((pt[0], pt[1]+1))
if skeleton[pt[0]+1][pt[1]-1] == 1:
pt_list.append((pt[0]+1, pt[1]-1))
if skeleton[pt[0]+1][pt[1]] == 1:
pt_list.append((pt[0]+1, pt[1]))
if skeleton[pt[0]+1][pt[1]+1] == 1:
pt_list.append((pt[0]+1, pt[1]+1))
return pt_list
def generate_node_connections(node_pts):
node_num = len(node_pts[0])
connection_mtx = np.zeros((node_num, node_num), dtype=int)
#Generate matrix of connection lengths, 0 is no connection
for i, pt in enumerate(node_pts[0]):
path_list = find_adjacent(pt, skeleton)
for p, path_start in enumerate(path_list):
cur_p = path_start
last_p = pt
path_length = 1
while cur_p not in node_pts[0]:
paths = find_adjacent(cur_p, skeleton)
if len(paths) > 2:
print("This point has too many adjacent points")
break
paths.remove(last_p)
last_p = cur_p
cur_p = paths[0]
path_length+=1
node_idx = node_pts[0].index(cur_p)
connection_mtx[i][node_idx] = path_length
# print('\n'.join([''.join(['{:3}'.format(item) for item in row]) for row in connection_mtx]))
#Convert from matrix to list of connections and lengths
node_connection_list = [[],[],[],[]]
for i, pt in enumerate(node_pts[0]):
nz = | np.nonzero(connection_mtx[i]) | numpy.nonzero |
import sys
sys.path
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from tqdm import tqdm_notebook
import json
import xlrd
import torch
import torchvision
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
from torchvision.utils import make_grid
from sklearn.metrics import roc_curve, auc, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
class ECG_Net(nn.Module):
def __init__(self, init_weights= True, num_classes=26, cfg = None):
super(ECG_Net, self).__init__()
if cfg is None:
cfg = [32, 32,32, 64,64,64, 128, 128, 128, 256, 256, 256, 512, 512, 512]
inchannel = 12
self.pre_conv = nn.Sequential(
nn.Conv1d(inchannel, cfg[0], kernel_size=21, stride= 2, padding=10, bias=True),
nn.BatchNorm1d(cfg[0]),
nn.ReLU(),
nn.Conv1d(cfg[0], cfg[1], kernel_size=21, stride= 2, padding=10, bias=True),
nn.BatchNorm1d(cfg[1]),
nn.ReLU(),
nn.Conv1d(cfg[1], cfg[2], kernel_size=21, stride= 2, padding=10, bias=True),
nn.BatchNorm1d(cfg[2]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2)
)
self.stage1 = nn.Sequential(
nn.Conv1d(cfg[2], cfg[3], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[3]),
nn.ReLU(),
nn.Conv1d(cfg[3], cfg[4], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[4]),
nn.ReLU(),
nn.Conv1d(cfg[4], cfg[5], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[5]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2)
)
self.stage2 = nn.Sequential(
nn.Conv1d(cfg[5], cfg[6], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[6]),
nn.ReLU(),
nn.Conv1d(cfg[6], cfg[7], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[7]),
nn.ReLU(),
nn.Conv1d(cfg[7], cfg[8], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[8]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2)
)
self.stage3 = nn.Sequential(
nn.Conv1d(cfg[8], cfg[9], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[9]),
nn.ReLU(),
nn.Conv1d(cfg[9], cfg[10], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[10]),
nn.ReLU(),
nn.Conv1d(cfg[10], cfg[11], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[11]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2)
)
self.stage4 = nn.Sequential(
nn.Conv1d(cfg[11], cfg[12], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[12]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(cfg[12], cfg[13], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[13]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(cfg[13], cfg[14], kernel_size=5, stride= 1, padding=2, bias=True),
nn.BatchNorm1d(cfg[14]),
nn.ReLU(),
nn.MaxPool1d(2, stride=2)
)
self.avg_pool = nn.AvgPool1d(2, stride=2)
self.dense1 =nn.Sequential(
nn.Linear(cfg[14]*2, 512, bias = True),
nn.ReLU(),
nn.Dropout(p=0.6)
)
self.dense2 =nn.Sequential(
nn.Linear(512, 512, bias = True),
nn.ReLU(),
nn.Dropout(p=0.6)
)
self.classifer = nn.Sequential(
nn.Linear(512, num_classes, bias = True),
nn.Sigmoid()
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = torch.squeeze(x, dim=1)
x = torch.transpose(x, 1, 2)
out = self.pre_conv(x)
out = self.stage1(out)
out = self.stage2(out)
out = self.stage3(out)
out = self.stage4(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.dense1(out)
out = self.dense2(out)
out = self.classifer(out)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.constant_(m.bias, 0)
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
nn.init.xavier_normal_(m.weight)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ECG_Net().to(device)
def train_model(model,train_loader,val_loader,batch_size, num_epochs,loss_fun,optimizer):
# to track the training loss as the model trains
train_losses = []
# to track the validation loss as the model trains
valid_losses = []
# to track the average training loss per epoch as the model trains
avg_train_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
for epoch in tqdm_notebook(range(1, num_epochs + 1)):
if epoch in [10,20,30,40,50]:
for param_group in optimizer.param_groups:
param_group['lr']*=0.1
train_running_precision = 0.0
train_running_recall = 0.0
val_running_precision = 0.0
val_running_recall = 0.0
train_running_acc = 0
val_running_acc = 0
train_batch_num = 0
val_batch_num = 0
###################
# train the model #
###################
model.train() # model for training
for batch_idx, (data, label) in tqdm_notebook(enumerate(train_loader)):
data, label = data.float().to(device), label.to(device)
data, target = Variable(data), Variable(label)
optimizer.zero_grad()
output = model(data)
loss = loss_fun(output, label)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=5, norm_type=2)
optimizer.step()
train_losses.append(loss.item())
train_batch_num +=1
######################
# validate the model #
######################
pre_result = []
label_tmp = []
model.eval() # prep model for evaluation
with torch.no_grad():
for batchsize,(data, label) in tqdm_notebook(enumerate(val_loader)):
data, label = data.float().to(device), label.to(device)
data, target = Variable(data), Variable(label)
output = model(data)
loss = loss_fun(output, label).item() # sum up batch loss
valid_losses.append(loss)
pre_result.append(output)
label_tmp.append(label)
pred = torch.cat(pre_result,dim=0)
label = torch.cat(label_tmp,dim=0)
best_thres=np.array([0.8, 0.8, 0.64, 0.76 , 0.75 ,0.61, 0.71, 0.78, 0.47, 0.8, 0.49, 0.85, 0.57,
0.32 , 0.68, 0.46, 0.22, 0.83, 0.87 ,0.11, 0.52, 0.58, 0.85, 0.43 , 0.75, 0.33 ])
best_thres = torch.from_numpy(best_thres)
acc_result = torch.ge(pred.cpu(), best_thres)
acc_result = acc_result+0
acc = accuracy_score(label.cpu(), acc_result.cpu(),normalize = True)
train_loss = np.average(train_losses)
valid_loss = | np.average(valid_losses) | numpy.average |
import numpy as np
from scipy.spatial import Delaunay
from sklearn.linear_model import LinearRegression
from collections import defaultdict
from math import inf
def closest_points(point, points, k):
K = []
for i in range(len(points)):
point2 = [points[i][0][0], points[i][1][0]] # points[i][0] + points[i][1]
dist = np.linalg.norm(np.array(point) - np.array(point2))
if dist < k:
K.append(point2)
return K
def in_hull(p, hull):
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p) >= 0
def LWR(X, Q, weights, point):
y = np.zeros((len(X), len(point)))
for i in range(len(X)-1):
y[i] = Q[X[i][0]][X[i][1]]
regr = LinearRegression() # need to use another function
regr.fit(X, y, sample_weight=weights)
return regr.predict(point)
def find_max_q(env, alpha, gamma, h, k_threshold, Q, e,):
qmax = [-inf, -inf]
actions = np.linspace(env.min_action, env.max_action, 1000)
for a in actions:
qnew = hedger_prediction(env.state, a, h, k_threshold, env, alpha, gamma, Q, e)
if np.all(qmax < qnew):
qmax = qnew
return qmax
def hedger_prediction(state, action, h, k_threshold, env, alpha, gamma, Q, e):
q = | np.array(state + action) | numpy.array |
import os
import cv2
import numpy as np
import tensorflow as tf
import random
import math
from config import cfg
from data_augmentation import get_training_patches, check_for_TTA, generate_TTA_tiles
ratio_threshold = [0.25, 0.2, 0.15, 0.1, 0.05]
data_path = cfg.data_path
image_folder = 'images'
gt_folder = 'ground_truth'
log_path = '../log'
mean_r, mean_g, mean_b = -1, -1, -1
def get_mean_color_values():
print("Calculating mean colors for training dataset!!")
img_path = data_path + '/train/' + image_folder + '/'
r_channels = []
g_channels = []
b_channels = []
for file in os.listdir(img_path):
img = cv2.imread(img_path + file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
r_channels.append(img[:,:,0])
g_channels.append(img[:,:,1])
b_channels.append(img[:,:,2])
mean_r = np.mean(np.array(r_channels))
mean_g = np.mean(np.array(g_channels))
mean_b = np.mean(np.array(b_channels))
print("mean values for - R: {0}, G: {1}, B: {2}".format(mean_r, mean_g, mean_b))
return mean_r, mean_g, mean_b
if cfg.compute_mean_rgb and mean_r == -1 and mean_g == -1 and mean_b == -1:
mean_r, mean_g, mean_b = get_mean_color_values()
else:
mean_r, mean_g, mean_b = 103.61419795694658, 109.0776216765373, 100.39708955555578
def get_num_patches_per_tile(height, width, training_stride=cfg.patch_size//2):
total_width = width + (2 * cfg.train_tile_padding)
num_patches_width = int((total_width-cfg.patch_size)/training_stride)
total_height = height + (2 * cfg.train_tile_padding)
num_patches_height = int((total_height-cfg.patch_size)/training_stride)
return num_patches_height, num_patches_width
def load_training_data(epoch):
batch_size = cfg.batch_size
patch_size = cfg.patch_size
path = data_path
img_path = data_path + '/train/' + image_folder + '/'
gt_path = data_path + '/train/' + gt_folder + '/'
num_building_px = 0
total_num_px = 0
image_files = os.listdir(img_path)
random.shuffle(image_files)
if len(image_files) % cfg.image_tile_per_mini_epoch == 0:
call_count = len(image_files) // cfg.image_tile_per_mini_epoch
else:
call_count = (len(image_files) // cfg.image_tile_per_mini_epoch) + 1
#call_count is the number of times the loop will iterate to get all patches for 1 epoch
yield call_count
height, width, _ = cv2.imread(img_path + image_files[0]).shape
stride = cfg.patch_size//2
num_patches_height, num_patches_width = get_num_patches_per_tile(height, width, stride)
num_patches_per_tile = num_patches_height * num_patches_width
train_count = num_patches_per_tile * cfg.image_tile_per_mini_epoch
train_count -= train_count % cfg.batch_size
val_count = 10 * cfg.batch_size
num_patches_per_img = num_patches_per_tile
x_list = []
y_list = []
if cfg.patch_generation == 'sequential' or (cfg.patch_generation == 'alternating' and epoch%2==0):
for i in range(num_patches_width):
x_list += [i * stride]
x_list *= num_patches_height
for i in range(num_patches_height):
y_list += [i * stride] * num_patches_width
########### to shuffle the list of x and corresponding y positions ############
shuffled_index = random.sample(range(0,len(x_list)), len(x_list))
x_list_new = []
y_list_new = []
for position_index in shuffled_index:
x_list_new.append(x_list[position_index])
y_list_new.append(y_list[position_index])
x_list = x_list_new
y_list = y_list_new
else:
# x_list, y_list = np.random.randint(cfg.tile_width + (2 * cfg.tile_padding)-cfg.patch_size, size = (2, num_patches_per_img))
x_list = np.random.randint(width + (2 * cfg.train_tile_padding)-cfg.patch_size, size = (num_patches_per_img))
y_list = np.random.randint(height + (2 * cfg.train_tile_padding)-cfg.patch_size, size = (num_patches_per_img))
for call_num in range(call_count):
img_tiles = []
gt_tiles = []
for i in range(cfg.image_tile_per_mini_epoch):
if len(image_files) == 0:
break
image_file = image_files.pop()
img = cv2.imread(img_path + image_file)
img = cv2.copyMakeBorder(img, cfg.train_tile_padding, cfg.train_tile_padding, cfg.train_tile_padding, cfg.train_tile_padding, cv2.BORDER_REFLECT_101)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_tiles.append(img)
gt = cv2.imread(gt_path + image_file, 0)
gt = cv2.copyMakeBorder(gt, cfg.train_tile_padding, cfg.train_tile_padding, cfg.train_tile_padding, cfg.train_tile_padding, cv2.BORDER_REFLECT_101)
gt_tiles.append(gt)
images, ground_truths = get_training_patches(img_tiles, gt_tiles, x_list, y_list, mean_r, mean_g, mean_b)
images = np.array(images).astype(np.float32) / 255.
ground_truths = np.array(ground_truths).astype(np.float32) / 255.
# else block drops batches if building to non-building ratio is less than than threshold for first 5 epoch and maintain_ratio flag is set to True
if not cfg.maintain_ratio or epoch > 4:
trX = images[:train_count]
trY = ground_truths[:train_count]
valX = images[-val_count:]
valY = ground_truths[-val_count:]
trY = trY.reshape((-1, cfg.patch_size, cfg.patch_size, 1))
valY = valY.reshape((val_count, cfg.patch_size, cfg.patch_size, 1))
num_tr_batch = trY.shape[0] // cfg.batch_size
num_val_batch = val_count // cfg.batch_size
else:
threshold = ratio_threshold[epoch]
special_batch_images = []
special_batch_gt = []
for batch_index in range(0, images.shape[0], cfg.batch_size):
check_batch = np.array(ground_truths[batch_index:batch_index+cfg.batch_size]).astype(np.uint8)
building_px_count = np.count_nonzero(check_batch)
total_px_count = check_batch.size
if building_px_count/total_px_count >= threshold:
# for batch in range(cfg.batch_size):
special_batch_images.extend(images[batch_index : batch_index+cfg.batch_size])
special_batch_gt.extend(ground_truths[batch_index : batch_index+cfg.batch_size])
special_train_count = len(special_batch_images)
special_val_count = 2 * cfg.batch_size
images = np.array(special_batch_images)
ground_truths = np.array(special_batch_gt)
trX = images[:special_train_count]
trY = ground_truths[:special_train_count]
valX = images[-special_val_count:]
valY = ground_truths[-special_val_count:]
trY = trY.reshape((special_train_count, cfg.patch_size, cfg.patch_size, 1))
valY = valY.reshape((special_val_count, cfg.patch_size, cfg.patch_size, 1))
num_tr_batch = special_train_count // cfg.batch_size
num_val_batch = special_val_count // cfg.batch_size
yield trX, trY.astype(np.uint8), num_tr_batch, valX, valY.astype(np.uint8), num_val_batch
num_building_px += np.count_nonzero(trY)
total_num_px += trY.size
building_total_ratio = round((num_building_px / total_num_px) * 100, 2)
print("epoch: {0}, building to total pixels ratio: {1}%".format(epoch, building_total_ratio))
def get_patch_weights(patch_size=cfg.test_patch_size):
choice = 1
if choice == 0:
step_size = (1.0 - 0.5)/(patch_size/2)
a = np.arange(1.0, 0.5, -step_size)
b = a[::-1]
c = np.concatenate((b,a))
ct = c.reshape(-1,1)
x = ct*c
return x
elif choice == 1:
min_weight = 0.5
step_count = patch_size//4
step_size = (1.0 - min_weight)/step_count
a = np.ones(shape=(patch_size,patch_size), dtype=np.float32)
a = a * min_weight
for i in range(1, step_count + 1):
a[i:-i, i:-i] += step_size
a = cv2.GaussianBlur(a,(5,5),0)
return a
else:
a = np.ones(shape=(patch_size,patch_size), dtype=np.float32)
return a
def get_image_path():
if cfg.is_training:
test_img_path = data_path + '/validation/' + image_folder + '/'
else:
test_img_path = data_path + '/test/' + image_folder + '/'
tta_status = check_for_TTA(test_img_path)
if cfg.test_time_augmentation and not tta_status:
generate_TTA_tiles(test_img_path, test_img_path)
# files = [f for f in os.listdir(test_img_path) if f.endswith('.tif')]
files = [f for f in os.listdir(test_img_path)]
file_count = len(files)
yield file_count
for fi in files:
yield str(test_img_path + '/' + fi)
def get_image_patches(img_path, patch_size=cfg.test_patch_size, batch_size=cfg.batch_size, batch_count=250, stride=cfg.test_patch_size//2, log_path='../log'):
img = cv2.imread(img_path)
img = cv2.copyMakeBorder(img, cfg.test_tile_padding, cfg.test_tile_padding, cfg.test_tile_padding, cfg.test_tile_padding, cv2.BORDER_REFLECT_101)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
##Segment to get mean subtracted image
mean_adjusted_img_r = img[:,:,0] - mean_r
mean_adjusted_img_g = img[:,:,1] - mean_g
mean_adjusted_img_b = img[:,:,2] - mean_b
img = cv2.merge([mean_adjusted_img_r, mean_adjusted_img_g, mean_adjusted_img_b]).astype(np.float32)
img = img / 255.
x, y, z = img.shape
yield x, y
call_count = math.ceil((((x - patch_size)/stride + 1)*((y - patch_size)/stride + 1))/(batch_size*batch_count))
yield call_count
# print('call_count: {0}'.format(call_count))
img_map = | np.zeros((x, y), dtype=np.float32) | numpy.zeros |
from __future__ import print_function
from optparse import OptionParser
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import pymaster as nmt
import os
import sys
DTOR=np.pi/180
def opt_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
parser = OptionParser()
parser.add_option('--nside', dest='nside_out', default=512, type=int,
help='Resolution parameter')
parser.add_option('--isim-ini', dest='isim_ini', default=1, type=int,
help='Index of first simulation')
parser.add_option('--isim-end', dest='isim_end', default=100, type=int,
help='Index of last simulation')
parser.add_option('--wo-contaminants', dest='wo_cont', default=False, action='store_true',
help='Set if you don\'t want to use contaminants')
parser.add_option('--plot', dest='plot_stuff', default=False, action='store_true',
help='Set if you want to produce plots')
parser.add_option('--wo-pureb', dest='wo_pureb', default=False, action='store_true',
help='Set if you don\'t want to purify B-modes')
parser.add_option('--no-deproject',dest='no_deproject',default=False,action='store_true',
help='Set if you will include contaminants but won\'t clean them')
parser.add_option('--no-debias',dest='no_debias',default=False,action='store_true',
help='Set if you will include contaminants, clean them but won\'t correct for the bias')
(o, args) = parser.parse_args()
nsims=o.isim_end-o.isim_ini+1
w_cont=not o.wo_cont
w_pureb=not o.wo_pureb
#Create output directory
predir="tests_sph"
os.system("mkdir -p "+predir)
prefix=predir+"/run_pure0%d_ns%d_cont%d"%(w_pureb,o.nside_out,w_cont)
if o.no_deproject :
prefix+="_no_deproj"
if o.no_debias :
prefix+="_no_debias"
#Read theory power spectra
def read_cl_camb(fname) :
data=np.loadtxt(fname,unpack=True)
ll=np.arange(3*o.nside_out,dtype=float)
fac=2*np.pi/(ll[2:]*(ll[2:]+1.))
cl_tt=np.zeros_like(ll); cl_tt[2:]=data[1,:3*o.nside_out-2]*fac
cl_ee=np.zeros_like(ll); cl_ee[2:]=data[2,:3*o.nside_out-2]*fac
cl_bb=np.zeros_like(ll); cl_bb[2:]=data[3,:3*o.nside_out-2]*fac
cl_te=np.zeros_like(ll); cl_te[2:]=data[4,:3*o.nside_out-2]*fac
return ll,cl_tt,cl_ee,cl_bb,cl_te
l,cltt,clee,clbb,clte=read_cl_camb("data/cls_cmb.txt")
#Noise power spectrum
nlev=(1.*np.pi/(180*60))**2 #1 uK-arcmin noise level
nltt=nlev*(np.ones_like(l)+(25./(l+0.1))**2.4) #1/ell noise with a knee scale of ell=10 (optimistic)
nlee=2*nltt; nlbb=2*nltt; nlte=0*nltt
#Beam
fwhm_amin=20. #Corresponding to 0.5m aperture at 90GHz
beam=np.exp(-0.5*l*(l+1)*(fwhm_amin*np.pi/(180*60*2.355))**2)
#Read mask
mask=hp.read_map("data/mask_cmb_ns%d.fits"%o.nside_out,verbose=False)
fsky=np.mean(mask/ | np.amax(mask) | numpy.amax |
import h5py
import numpy as np
from pyquaternion import Quaternion
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Generate new IMU measurements for all trajectories in the given hdf5 file')
parser.add_argument('--hdf5_path', type=str, help='path to hdf5 file',required=True)
args = parser.parse_args()
if __name__ == '__main__':
answer = str(input("Warning: this script will overwrite IMU measurements stored in the given hdf5 dataset. \n"+ \
"Do you want to proceed? (y/n): "))
if not(answer=="y" or answer=="Y"):
sys.exit(0)
database = h5py.File(args.hdf5_path, "a")
db_path = os.path.dirname(args.hdf5_path)
# IMU noise parameters chosen randomly in a range of values encountered in real devices
noise_acc = 2 * np.power(10., -np.random.uniform(low=1., high=3., size=(1, 3)))
noise_gyr = np.power(10., -np.random.uniform(low=1., high=3., size=(1, 3)))
imu_bias_acc_rw = 2 * np.power(10., -np.random.uniform(low=3., high=6., size=(1, 3)))
imu_bias_gyr_rw = np.power(10., -np.random.uniform(low=4., high=6., size=(1, 3)))
for dataset in database:
print("Currently processing : %s" % dataset)
gt_group = database[dataset]["groundtruth"]
gt_attitude = gt_group["attitude"]
gt_angular_vel = gt_group["angular_velocity"]
gt_accelerations = gt_group["acceleration"]
imu_group = database[dataset]["imu"]
# Set init parameters
imu_accelerometer = np.zeros(gt_attitude.shape, dtype=float)
imu_gyroscope = np.zeros(gt_attitude.shape, dtype=float)
imu_bias_acc = np.random.normal([0., 0., 0.], imu_bias_acc_rw)
imu_bias_gyr = np.random.normal([0., 0., 0.], imu_bias_gyr_rw)
init_bias_est_acc = imu_bias_acc + np.random.normal([0., 0., 0.], noise_acc / 50)
init_bias_est_gyr = imu_bias_gyr + np.random.normal([0., 0., 0.], noise_gyr / 50)
imu_group["accelerometer"].attrs["init_bias_est"] = init_bias_est_acc
imu_group["gyroscope"].attrs["init_bias_est"] = init_bias_est_gyr
# Pass over trajectory to generate simulated sensor measurements
for i in range(gt_attitude.shape[0]):
attitude = Quaternion(gt_attitude[i, :])
imu_accelerometer = attitude.conjugate.rotate(gt_accelerations[i, :] + np.array([0., 0., -9.81])) \
+ imu_bias_acc + np.random.normal([0., 0., 0.], noise_acc)
imu_gyroscope = gt_angular_vel[i, :] + imu_bias_gyr + | np.random.normal([0., 0., 0.], noise_gyr) | numpy.random.normal |
"""Implementation of a variety of regression predictors."""
import numpy as np
import scipy
import sklearn.metrics
from PIL import Image
class Predictor():
"""Base class for predictors.
Parameters
----------
zs_train : list
List of training observations.
ys_train : list
List of training measurements.
online : boolean
Flag to determine whether prediction should incorporate
basic memory.
"""
def __init__(self, zs=[], ys=[], online=False):
"""Create a predictor."""
self.zs_train = zs
self.ys_train = ys
if online:
self.prev_pred = np.zeros_like(ys[0])
self.online = online
def add_data(self, zs, ys):
"""Add data to the predictor.
Parameters
----------
zs : list
Observations to add.
ys : list
Corresponding labels.
"""
assert len(zs) == len(ys)
self.zs_train += zs
self.ys_train += ys
def pred(self, zs):
"""Prediction function.
Parameters
----------
zs : list
New observations.
Returns
-------
preds : list
Predicted labels.
"""
preds, _ = self.compute_pred(zs)
if self.online:
if np.linalg.norm(preds[-1]) < 1e-6:
print('using prev pred!')
preds[-1] = self.prev_pred
else:
prev_pred = preds[-1]
return preds
class KernelPredictor(Predictor):
"""Nonparametric Nadarya-Watson kernel estimator.
y_hat = sum_t y_t * Ind(d(z_t, z) < gamma)
Parameters
----------
distance : str
The type of distance metric to use. Currently only l2 is implemented.
gamma: float
Bandwidth parameter.
transform : str
The type of transformation to perform on observations before
computing the distance.
"""
def __init__(self, zs=[], ys=[], distance='l2', gamma=1,
transform='identity', online=False):
super().__init__(zs=zs, ys=ys, online=False)
self.zs_train_arr = None
distance_dict = {'l2': self._default_distance}
transform_dict = {'identity': self._default_transform,
'resnet': self._resnet_transform,
'sift': self._sift_transform,
'canny': self._canny_transform,
'hog': self._hog_transform,
'gaussian': self._gaussian_transform}
self.distance = distance_dict[distance]
self.transform = transform_dict[transform]
self.gamma = gamma
def _default_distance(self, x, y):
return sklearn.metrics.pairwise.euclidean_distances(x, y)
def _default_transform(self, zs):
return np.array(zs).reshape(len(zs), -1)
def _sift_transform(self, zs):
import cv2
n_features = 10
sift = cv2.xfeatures2d.SIFT_create(n_features)
vecs = []
for z in zs:
if len(z.shape) < 3:
rgb_arr = 255*(np.array([z]*3).T)
else:
rgb_arr = z[:, :, :]
_, descriptors = sift.detectAndCompute(np.uint8(rgb_arr), None)
vecs.append(descriptors[:10].flatten())
return np.array(vecs)
def _canny_transform(self, zs):
import cv2
vecs = []
for z in zs:
if len(z.shape) < 3:
rgb_arr = 255*(np.array([z]*3).T)
else:
rgb_arr = z[:, :, :]
edges = cv2.Canny(np.uint8(rgb_arr), 100, 200)
vecs.append(edges.flatten())
return np.array(vecs)
def _gaussian_transform(self, zs):
import skimage
vecs = []
for z in zs:
if len(z.shape) < 3:
rgb_arr = 255*(np.array([z]*3).T)
else:
rgb_arr = z[:, :, :]
transform = skimage.filters.gaussian(rgb_arr, sigma=2)
vecs.append(transform.flatten())
return np.array(vecs)
def _hog_transform(self, zs):
from skimage.feature import hog
vecs = []
for z in zs:
if len(z.shape) < 3:
rgb_arr = 255*(np.array([z]*3).T)
else:
rgb_arr = z[:, :, :]
_, hog_img = hog(rgb_arr, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=True)
vecs.append(hog_img.flatten())
return np.array(vecs)
def _resnet_transform(self, zs):
from img2vec_pytorch import Img2Vec
img2vec = Img2Vec(cuda=False)
img_list = []; vecs = []
for z in zs:
if len(z.shape) < 3:
rgb_arr = 255*(np.array([z]*3).T)
else:
rgb_arr = z
img_list.append(Image.fromarray(np.uint8(rgb_arr)))
vecs += [img2vec.get_vec(img_list[-1], tensor=False)]
return np.vstack(vecs)
def add_data(self, zs, ys):
super().add_data(zs=zs, ys=ys)
self.zs_train_arr = None
def compute_pred(self, zs, param_list=None):
"""Compute predictions.
Parameters
----------
zs : list
New observations.
param_list: list, optional
List of alternate hyperparameters to try.
Returns
-------
preds : list or dict
Predicted measurements.
sTs : list or dict
Coverage terms.
"""
if self.zs_train_arr is None: # lazy updates
self.zs_train_arr = self.transform(self.zs_train)
zs = self.transform(zs)
distances = self.distance( | np.array(self.zs_train_arr) | numpy.array |
from __future__ import print_function, absolute_import, division
import contextlib
import sys
import numpy as np
import random
import threading
import gc
from numba import unittest_support as unittest
from numba.errors import TypingError
from numba import config
from numba import njit
from numba import types
from numba import utils
from numba.numpy_support import version as numpy_version
from .support import MemoryLeakMixin, TestCase, tag
nrtjit = njit(_nrt=True, nogil=True)
def np_concatenate1(a, b, c):
return np.concatenate((a, b, c))
def np_concatenate2(a, b, c, axis):
return np.concatenate((a, b, c), axis=axis)
def np_stack1(a, b, c):
return np.stack((a, b, c))
def np_stack2(a, b, c, axis):
return np.stack((a, b, c), axis=axis)
def np_hstack(a, b, c):
return np.hstack((a, b, c))
def np_vstack(a, b, c):
return np.vstack((a, b, c))
def np_dstack(a, b, c):
return np.dstack((a, b, c))
def np_column_stack(a, b, c):
return np.column_stack((a, b, c))
class BaseTest(TestCase):
def check_outputs(self, pyfunc, argslist, exact=True):
cfunc = nrtjit(pyfunc)
for args in argslist:
expected = pyfunc(*args)
ret = cfunc(*args)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
if exact:
np.testing.assert_equal(expected, ret)
else:
np.testing.assert_allclose(expected, ret)
class NrtRefCtTest(MemoryLeakMixin):
def assert_array_nrt_refct(self, arr, expect):
self.assertEqual(arr.base.refcount, expect)
class TestDynArray(NrtRefCtTest, TestCase):
def test_empty_0d(self):
@nrtjit
def foo():
arr = np.empty(())
arr[()] = 42
return arr
arr = foo()
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(42, arr)
self.assertEqual(arr.size, 1)
self.assertEqual(arr.shape, ())
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, ())
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_1d(self):
@nrtjit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_2d(self):
def pyfunc(m, n):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
expected_arr = pyfunc(m, n)
got_arr = cfunc(m, n)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_empty_3d(self):
def pyfunc(m, n, p):
arr = np.empty((m, n, p), np.int32)
for i in range(m):
for j in range(n):
for k in range(p):
arr[i, j, k] = i + j + k
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_empty_2d_sliced(self):
def pyfunc(m, n, p):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr[p]
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_return_global_array(self):
y = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(y)
def return_external_array():
return y
cfunc = nrtjit(return_external_array)
out = cfunc()
# out reference by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
np.testing.assert_equal(y, out)
np.testing.assert_equal(y, np.ones(4, dtype=np.float32))
np.testing.assert_equal(out, np.ones(4, dtype=np.float32))
del out
gc.collect()
# out is only referenced by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
del cfunc
gc.collect()
# y is no longer referenced by cfunc
self.assertEqual(initrefct, sys.getrefcount(y))
@tag('important')
def test_return_global_array_sliced(self):
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y[2:]
cfunc = nrtjit(return_external_array)
out = cfunc()
self.assertIsNone(out.base)
yy = y[2:]
np.testing.assert_equal(yy, out)
np.testing.assert_equal(yy, np.ones(2, dtype=np.float32))
np.testing.assert_equal(out, np.ones(2, dtype=np.float32))
def test_array_pass_through(self):
def pyfunc(y):
return y
arr = np.ones(4, dtype=np.float32)
cfunc = nrtjit(pyfunc)
expected = cfunc(arr)
got = pyfunc(arr)
np.testing.assert_equal(expected, arr)
np.testing.assert_equal(expected, got)
self.assertIs(expected, arr)
self.assertIs(expected, got)
@tag('important')
def test_array_pass_through_sliced(self):
def pyfunc(y):
return y[y.size // 2:]
arr = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(arr)
cfunc = nrtjit(pyfunc)
got = cfunc(arr)
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
expected = pyfunc(arr)
self.assertEqual(initrefct + 2, sys.getrefcount(arr))
np.testing.assert_equal(expected, arr[arr.size // 2])
np.testing.assert_equal(expected, got)
del expected
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
del got
self.assertEqual(initrefct, sys.getrefcount(arr))
def test_ufunc_with_allocated_output(self):
def pyfunc(a, b):
out = np.empty(a.shape)
np.add(a, b, out)
return out
cfunc = nrtjit(pyfunc)
# 1D case
arr_a = np.random.random(10)
arr_b = np.random.random(10)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 2D case
arr_a = np.random.random(10).reshape(2, 5)
arr_b = np.random.random(10).reshape(2, 5)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 3D case
arr_a = np.random.random(70).reshape(2, 5, 7)
arr_b = np.random.random(70).reshape(2, 5, 7)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
def test_allocation_mt(self):
"""
This test exercises the array allocation in multithreaded usecase.
This stress the freelist inside NRT.
"""
def pyfunc(inp):
out = np.empty(inp.size)
# Zero fill
for i in range(out.size):
out[i] = 0
for i in range(inp[0]):
# Allocate inside a loop
tmp = np.empty(inp.size)
# Write to tmp
for j in range(tmp.size):
tmp[j] = inp[j]
# out = tmp + i
for j in range(tmp.size):
out[j] += tmp[j] + i
return out
cfunc = nrtjit(pyfunc)
size = 10 # small array size so that the computation is short
arr = np.random.randint(1, 10, size)
frozen_arr = arr.copy()
np.testing.assert_equal(pyfunc(arr), cfunc(arr))
# Ensure we did not modify the input
np.testing.assert_equal(frozen_arr, arr)
workers = []
inputs = []
outputs = []
# Make wrapper to store the output
def wrapped(inp, out):
out[:] = cfunc(inp)
# Create a lot of worker threads to create contention
for i in range(100):
arr = np.random.randint(1, 10, size)
out = np.empty_like(arr)
thread = threading.Thread(target=wrapped,
args=(arr, out),
name="worker{0}".format(i))
workers.append(thread)
inputs.append(arr)
outputs.append(out)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for inp, out in zip(inputs, outputs):
np.testing.assert_equal(pyfunc(inp), out)
def test_refct_mt(self):
"""
This test exercises the refct in multithreaded code
"""
def pyfunc(n, inp):
out = np.empty(inp.size)
for i in range(out.size):
out[i] = inp[i] + 1
# Use swap to trigger many refct ops
for i in range(n):
out, inp = inp, out
return out
cfunc = nrtjit(pyfunc)
size = 10
input = np.arange(size, dtype=np.float)
expected_refct = sys.getrefcount(input)
swapct = random.randrange(1000)
expected = pyfunc(swapct, input)
np.testing.assert_equal(expected, cfunc(swapct, input))
# The following checks can discover a reference count error
del expected
self.assertEqual(expected_refct, sys.getrefcount(input))
workers = []
outputs = []
swapcts = []
# Make wrapper to store the output
def wrapped(n, input, out):
out[:] = cfunc(n, input)
# Create worker threads
for i in range(100):
out = np.empty(size)
# All thread shares the same input
swapct = random.randrange(1000)
thread = threading.Thread(target=wrapped,
args=(swapct, input, out),
name="worker{0}".format(i))
workers.append(thread)
outputs.append(out)
swapcts.append(swapct)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for swapct, out in zip(swapcts, outputs):
np.testing.assert_equal(pyfunc(swapct, input), out)
del outputs, workers
# The following checks can discover a reference count error
self.assertEqual(expected_refct, sys.getrefcount(input))
def test_swap(self):
def pyfunc(x, y, t):
"""Swap array x and y for t number of times
"""
for i in range(t):
x, y = y, x
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(100)
y = np.random.random(100)
t = 100
initrefct = sys.getrefcount(x), sys.getrefcount(y)
expect, got = pyfunc(x, y, t), cfunc(x, y, t)
self.assertIsNone(got[0].base)
self.assertIsNone(got[1].base)
np.testing.assert_equal(expect, got)
del expect, got
self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y)))
def test_return_tuple_of_array(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
initrefct = sys.getrefcount(x)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
self.assertIs(x, expected_x)
self.assertIs(x, got_x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
del expected_x, got_x
self.assertEqual(initrefct, sys.getrefcount(x))
self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y))
def test_return_tuple_of_array_created(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
out = y, y
return out
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
def test_issue_with_return_leak(self):
"""
Dispatcher returns a new reference.
It need to workaround it for now.
"""
@nrtjit
def inner(out):
return out
def pyfunc(x):
return inner(x)
cfunc = nrtjit(pyfunc)
arr = np.arange(10)
old_refct = sys.getrefcount(arr)
self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(cfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(arr))
class ConstructorBaseTest(NrtRefCtTest):
def check_0d(self, pyfunc):
cfunc = nrtjit(pyfunc)
expected = pyfunc()
ret = cfunc()
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
def check_1d(self, pyfunc):
cfunc = nrtjit(pyfunc)
n = 3
expected = pyfunc(n)
ret = cfunc(n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(-1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_2d(self, pyfunc):
cfunc = nrtjit(pyfunc)
m, n = 2, 3
expected = pyfunc(m, n)
ret = cfunc(m, n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(2, -1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_alloc_size(self, pyfunc):
"""Checks that pyfunc will error, not segfaulting due to array size."""
cfunc = nrtjit(pyfunc)
with self.assertRaises(ValueError) as e:
cfunc()
self.assertIn(
"array is too big",
str(e.exception)
)
class TestNdZeros(ConstructorBaseTest, TestCase):
def setUp(self):
super(TestNdZeros, self).setUp()
self.pyfunc = np.zeros
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
pyfunc = self.pyfunc
def func():
return pyfunc(())
self.check_0d(func)
def test_1d(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n)
self.check_1d(func)
def test_1d_dtype(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n, np.int32)
self.check_1d(func)
def test_1d_dtype_instance(self):
# dtype as numpy dtype, not as scalar class
pyfunc = self.pyfunc
_dtype = np.dtype('int32')
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def test_2d(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n))
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
pyfunc = self.pyfunc
def func1(m, n):
return pyfunc((np.int16(m), np.int32(n)))
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return pyfunc((np.int64(m), np.int8(n)))
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: pyfunc((m, n)))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
@tag('important')
def test_2d_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n), dtype=np.complex64)
self.check_2d(func)
def test_alloc_size(self):
pyfunc = self.pyfunc
width = types.intp.bitwidth
def gen_func(shape, dtype):
return lambda : pyfunc(shape, dtype)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, np.intp))
self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp))
class TestNdOnes(TestNdZeros):
def setUp(self):
super(TestNdOnes, self).setUp()
self.pyfunc = np.ones
@unittest.skipIf(numpy_version < (1, 8), "test requires Numpy 1.8 or later")
class TestNdFull(ConstructorBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
def func():
return np.full((), 4.5)
self.check_0d(func)
def test_1d(self):
def func(n):
return np.full(n, 4.5)
self.check_1d(func)
def test_1d_dtype(self):
def func(n):
return np.full(n, 4.5, np.bool_)
self.check_1d(func)
def test_1d_dtype_instance(self):
dtype = np.dtype('bool')
def func(n):
return np.full(n, 4.5, dtype)
self.check_1d(func)
def test_2d(self):
def func(m, n):
return np.full((m, n), 4.5)
self.check_2d(func)
def test_2d_dtype_kwarg(self):
def func(m, n):
return np.full((m, n), 1 + 4.5j, dtype=np.complex64)
self.check_2d(func)
def test_2d_dtype_from_type(self):
# tests issue #2862
def func(m, n):
return np.full((m, n), np.int32(1))
self.check_2d(func)
# tests meta issues from #2862, that np < 1.12 always
# returns float64. Complex uses `.real`, imaginary part dropped
def func(m, n):
return np.full((m, n), np.complex128(1))
self.check_2d(func)
# and that if a dtype is specified, this influences the return type
def func(m, n):
return np.full((m, n), 1, dtype=np.int8)
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
def func1(m, n):
return np.full((np.int16(m), np.int32(n)), 4.5)
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return np.full((np.int64(m), np.int8(n)), 4.5)
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: np.full((m, n), 4.5))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
def test_alloc_size(self):
width = types.intp.bitwidth
def gen_func(shape, value):
return lambda : np.full(shape, value)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, 1))
self.check_alloc_size(gen_func((1 << width - 8, 64), 1))
class ConstructorLikeBaseTest(object):
def mutate_array(self, arr):
try:
arr.fill(42)
except (TypeError, ValueError):
# Try something else (e.g. Numpy 1.6 with structured dtypes)
fill_value = b'x' * arr.dtype.itemsize
arr.fill(fill_value)
def check_like(self, pyfunc, dtype):
def check_arr(arr):
expected = pyfunc(arr)
ret = cfunc(arr)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
self.check_result_value(ret, expected)
# test writability
self.mutate_array(ret)
self.mutate_array(expected)
np.testing.assert_equal(ret, expected)
orig = np.linspace(0, 5, 6).astype(dtype)
cfunc = nrtjit(pyfunc)
for shape in (6, (2, 3), (1, 2, 3), (3, 1, 2), ()):
if shape == ():
arr = orig[-1:].reshape(())
else:
arr = orig.reshape(shape)
check_arr(arr)
# Non-contiguous array
if arr.ndim > 0:
check_arr(arr[::2])
# Check new array doesn't inherit readonly flag
arr.flags['WRITEABLE'] = False
# verify read-only
with self.assertRaises(ValueError):
arr[0] = 1
check_arr(arr)
# Scalar argument => should produce a 0-d array
check_arr(orig[0])
class TestNdEmptyLike(ConstructorLikeBaseTest, TestCase):
def setUp(self):
super(TestNdEmptyLike, self).setUp()
self.pyfunc = np.empty_like
def check_result_value(self, ret, expected):
pass
def test_like(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, np.float64)
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, dtype)
def test_like_dtype(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, np.int32)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('int32')
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype=np.int32)
self.check_like(func, np.float64)
class TestNdZerosLike(TestNdEmptyLike):
def setUp(self):
super(TestNdZerosLike, self).setUp()
self.pyfunc = np.zeros_like
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like_structured(self):
super(TestNdZerosLike, self).test_like_structured()
def test_like_dtype_structured(self):
super(TestNdZerosLike, self).test_like_dtype_structured()
class TestNdOnesLike(TestNdZerosLike):
def setUp(self):
super(TestNdOnesLike, self).setUp()
self.pyfunc = np.ones_like
self.expected_value = 1
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
super(TestNdOnesLike, self).test_like_structured()
@unittest.expectedFailure
def test_like_dtype_structured(self):
super(TestNdOnesLike, self).test_like_dtype_structured()
@unittest.skipIf(numpy_version < (1, 8), "test requires Numpy 1.8 or later")
class TestNdFullLike(ConstructorLikeBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like(self):
def func(arr):
return np.full_like(arr, 3.5)
self.check_like(func, np.float64)
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
def func(arr):
return np.full_like(arr, 4.5)
self.check_like(func, dtype)
def test_like_dtype(self):
def func(arr):
return np.full_like(arr, 4.5, np.bool_)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('bool')
def func(arr):
return np.full_like(arr, 4.5, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
def func(arr):
return np.full_like(arr, 4.5, dtype=np.bool_)
self.check_like(func, np.float64)
class TestNdIdentity(BaseTest):
def check_identity(self, pyfunc):
self.check_outputs(pyfunc, [(3,)])
def test_identity(self):
def func(n):
return np.identity(n)
self.check_identity(func)
def test_identity_dtype(self):
for dtype in (np.complex64, np.int16, np.bool_, | np.dtype('bool') | numpy.dtype |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lift metrics."""
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.addons.fairness.metrics import lift
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.proto import config_pb2
class LiftTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
def _assert_test(self,
num_buckets,
baseline_examples,
comparison_examples,
lift_metric_value,
ignore_out_of_bound_examples=False):
eval_config = config_pb2.EvalConfig(
cross_slicing_specs=[config_pb2.CrossSlicingSpec()])
computations = lift.Lift(
num_buckets=num_buckets,
ignore_out_of_bound_examples=ignore_out_of_bound_examples).computations(
eval_config=eval_config)
histogram = computations[0]
lift_metrics = computations[1]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
baseline_result = (
pipeline
| 'CreateB' >> beam.Create(baseline_examples)
| 'ProcessB' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSliceB' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogramB' >> beam.CombinePerKey(histogram.combiner)
) # pyformat: ignore
comparison_result = (
pipeline
| 'CreateC' >> beam.Create(comparison_examples)
| 'ProcessC' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSliceC' >> beam.Map(lambda x: (('slice'), x))
| 'ComputeHistogramC' >> beam.CombinePerKey(histogram.combiner)
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
merged_result = ((baseline_result, comparison_result)
| 'MergePCollections' >> beam.Flatten())
def check_result(got):
try:
self.assertLen(got, 2)
slice_1, metric_1 = got[0]
slice_2, metric_2 = got[1]
lift_value = None
if not slice_1:
lift_value = lift_metrics.cross_slice_comparison(metric_1, metric_2)
else:
lift_value = lift_metrics.cross_slice_comparison(metric_2, metric_1)
self.assertDictElementsAlmostEqual(
lift_value, {
metric_types.MetricKey(name=f'lift@{num_buckets}'):
lift_metric_value,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(merged_result, check_result, label='result')
def testLift_continuousLabelsAndPredictions(self):
baseline_examples = [{
'labels': np.array([0.0]),
'predictions': np.array([0.1]),
'example_weights': np.array([3.0]),
}, {
'labels': np.array([0.3]),
'predictions': np.array([0.5]),
'example_weights': np.array([5.0]),
}, {
'labels': np.array([0.6]),
'predictions': np.array([0.8]),
'example_weights': np.array([2.0]),
}, {
'labels': np.array([0.9]),
'predictions': np.array([0.3]),
'example_weights': np.array([8.0]),
}, {
'labels': | np.array([0.9]) | numpy.array |
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import soundpy as sp
import numpy as np
import pytest
test_dir = 'test_audio/'
test_audiofile = '{}audio2channels.wav'.format(test_dir)
test_traffic = '{}traffic.wav'.format(test_dir)
test_python = '{}python.wav'.format(test_dir)
test_horn = '{}car_horn.wav'.format(test_dir)
def test_separate_dependent_var_2d():
data = np.array(range(18)).reshape(-1,1)
with pytest.raises(ValueError):
X, y = sp.feats.separate_dependent_var(data)
def test_separate_dependent_var_3d():
data = np.array(range(12)).reshape(2,2,3)
X, y = sp.feats.separate_dependent_var(data)
expected1 = np.array([[[ 0, 1],[ 3, 4]],[[ 6, 7],[ 9, 10]]])
expected2 = np.array([2, 8])
assert np.array_equal(expected1, X)
assert np.array_equal(expected2, y)
def test_separate_dependent_var_3d_1feature_valueerror():
data = np.array(range(12)).reshape(2,6,1)
with pytest.raises(ValueError):
X, y = sp.feats.separate_dependent_var(data)
def test_separate_dependent_var_3d_2feats():
data = np.array(range(12)).reshape(2,3,2)
X, y = sp.feats.separate_dependent_var(data)
expected1 = np.array([[[ 0],[ 2],[ 4]],[[ 6],[ 8],[10]]])
expected2 = np.array([1, 7])
assert np.array_equal(expected1, X)
assert | np.array_equal(expected2, y) | numpy.array_equal |
import time
from .base import ControllerBase
from .myqt import QT
from spikeinterface.widgets.utils import get_unit_colors
from spikeinterface.toolkit import (get_template_extremum_channel, get_template_channel_sparsity,
compute_correlograms, localize_units, compute_num_spikes, WaveformPrincipalComponent,
compute_template_similarity)
import numpy as np
spike_dtype =[('sample_index', 'int64'), ('unit_index', 'int64'),
('channel_index', 'int64'), ('segment_index', 'int64'),
('visible', 'bool'), ('selected', 'bool'), ('included_in_pc', 'bool')]
_MAX_SPIKE_PER_UNIT_WARNING = 5000
class SpikeinterfaceController(ControllerBase):
def __init__(self, waveform_extractor=None,parent=None, verbose=False):
ControllerBase.__init__(self, parent=parent)
self.we = waveform_extractor
max_spikes_per_unit = self.we._params['max_spikes_per_unit']
if max_spikes_per_unit > _MAX_SPIKE_PER_UNIT_WARNING:
print(f'You have {max_spikes_per_unit} in your WaveformExtractor, the display can be slow')
print(f'You should re run the WaveformExtractor with max_spikes_per_unit=500')
if waveform_extractor.is_extension('principal_components'):
self.pc = waveform_extractor.load_extension('principal_components')
else:
self.pc = None
if waveform_extractor.is_extension('quality_metrics'):
qmc = waveform_extractor.load_extension('quality_metrics')
self.metrics = qmc._metrics
else:
self.metrics = None
if waveform_extractor.is_extension('spike_amplitudes'):
sac = waveform_extractor.load_extension('spike_amplitudes')
self.spike_amplitudes = sac.get_amplitudes(outputs='by_unit')
else:
self.spike_amplitudes = None
# some direct attribute
self.num_segments = self.we.recording.get_num_segments()
self.sampling_frequency = self.we.recording.get_sampling_frequency()
self.colors = get_unit_colors(self.we.sorting)
self.qcolors = {}
for unit_id, color in self.colors.items():
r, g, b, a = color
self.qcolors[unit_id] = QT.QColor(int(r*255), int(g*255), int(b*255))
self.unit_visible_dict = {unit_id:False for unit_id in self.unit_ids}
self.unit_visible_dict[self.unit_ids[0]] = True
if verbose:
t0 = time.perf_counter()
print('Gather all spikes')
all_spikes = self.we.sorting.get_all_spike_trains(outputs='unit_index')
num_spikes = np.sum(e[0].size for e in all_spikes)
# make internal spike vector
self.spikes = | np.zeros(num_spikes, dtype=spike_dtype) | numpy.zeros |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import OPTICS
import statsmodels.api as sm
import statsmodels.tsa.stattools as ts
from itertools import combinations, chain
import warnings
class OpticsPairs:
"""
This class implements the pairs selection framework outlined in
<NAME> and <NAME>'s publication:
Enhancing a Pairs Trading strategy with the application
of Machine Learning [1].
<http://premio-vidigal.inesc.pt/pdf/SimaoSarmentoMSc-resumo.pdf>`_
Their work is motivated by the need to find "profitable pairs while
constraining the search space" [1]. To achieve this, security returns
are first reduced via principal component analysis. Next the securities are
paired through clustering via the OPTICS algorithim introduced by
Ankerst et. al in their publication: OPTICS: Ordering Points To Identify
the Clustering Structure [2]
<https://www.dbs.ifi.lmu.de/Publikationen/Papers/OPTICS.pdf>`_
Finally, the pairs are filtered by criteria including: the Engle-Granger
test, analysis of the Hurst exponent, half-life filtering, and practical
implementation requirements.
"""
def __init__(self, data: pd.DataFrame):
"""
Initializes OpticsPairs object and calculates one-period returns of
securities.
:param data: pd.DataFrame containing time series returns of various
assets. Dimensions of dataframe should be TxN.
"""
self.prices = data
self.securities = self.prices.columns
self.returns = self.prices.pct_change()[1:]
self.returns_reduced = None # Reduced transform of returns from PCA
self.components_ = None # Components generated from PCA
self.n_components_ = None # Number of components of PCA
self.explained_variance_ratio_ = None # Vairance explained by PCA
self.pairs = None # Potential pairs found from OPTICS clusters
self.engle_granger_tests = None # pvalue Engle-Granger cointegration
self.norm_spreads = None # Z-score of spreads generated from pairs
self.hurst_exponents = None # Hurst exponent from normalized spreads
self.half_lives = None # Half-life of normalized spreads
self.avg_cross_count = None # Ann average count of spread crosses mean
self.pairs_df = None # Dataframeof summary stats and potential pairs
self.filtered_pairs = None # Filtered pairs_df
self.cluster_labels = None # Array of cluster labels for securities
def reduce_PCA(self,
n_components_: int = 10,
Scaler=StandardScaler(),
random_state: int = 42):
"""
Reduces self.returns to dimensions equal to n_components_ through
principal component analysis. Returns are first scaled via the Scaler
parameter. Then calculate correlation matrix of scaled returns.
Finally, principal component analysis is used to reduce dimensions.
:param n_components_: An integer to denote number of dimensions
for pca. Authors recommend n_components_ <= 15 [1].
:param Scaler: A transformer to scale input data. Scaled data is
recommended for principal component analysis.
:param random_state: An integer to denote the seed for PCA() to insure
reproducibility.
"""
if self.returns is None:
raise ValueError("returns not found: input price dataframe \
into OpticsPairs instance")
if n_components_ > int(15):
warnings.warn("Maximum n_components_ recommended is 15")
# PCA pipeline
pipe = Pipeline([
# Normalize raw data via user input scaler
('scaler', Scaler),
# Perform PCA on scaled returns
('pca', PCA(n_components=n_components_, random_state=random_state))
])
self.returns_reduced = pipe.fit_transform(self.returns)
self.components_ = pipe['pca'].components_
self.n_components_ = pipe['pca'].n_components_
self.explained_variance_ratio_ = pipe['pca'].explained_variance_ratio_
def find_pairs(self):
"""
Uses OPTICS algorithim to find clusters of similar securities within
PCA component space. Once clusters labels are assigned, function
generates series of tuples containing unique pairs of securities
within the same cluster.
"""
if self.returns_reduced is None:
raise ValueError("returns_reduced not found: must run \
.reduce_PCA() before this function")
# Initialize and fit OPTICS cluster to PCA components
clustering = OPTICS()
clustering.fit(self.components_.T)
# Create cluster data frame and identify trading pairs
clusters = pd.DataFrame({'security': self.securities,
'cluster': clustering.labels_})
# Clusters with label == -1 are 'noise'
# From OPTICS sk-learn documentation: Noisy samples and points
# which are not included in a leaf cluster of cluster_hierarchy_
# are labeled as -1
clusters = clusters[clusters['cluster'] != -1]
# Group securities by cluster and flatten list of combination lists
groups = clusters.groupby('cluster')
combos = list(groups['security'].apply(combinations, 2)) # All pairs
pairs = list(chain.from_iterable(combos)) # Flatten list of lists
print(f"Found {len(pairs)} potential pairs")
self.pairs = pd.Series(pairs)
self.cluster_labels = clustering.labels_
def calc_eg_norm_spreads(self):
"""
Calculates the p-value of the t-stat from the Engle-Granger
cointegration test. Calculates normalized beta-adjusted spread
series of potential pairs.
"""
if self.prices is None:
raise ValueError("prices not found: must initialize with \
price dataframe before this function")
if self.pairs is None:
raise ValueError("pairs not found: must run .find_pairs() \
before this function")
engle_granger_tests = []
norm_spreads = []
# Test each pair for cointegration
for pair in self.pairs:
security_0 = self.prices[pair[0]]
security_1 = self.prices[pair[1]]
# Get independent and dependent variables
# for OLS calculation and corresponding
# pvalue for Engle-Granger tests
security_0.fillna(0, inplace=True)
security_1.fillna(0, inplace=True)
pvalue, x_, y_ = OpticsPairs.get_ols_variables(security_0, security_1)
engle_granger_tests.append(pvalue)
x = np.array(x_, dtype=float)
y = np.array(y_, dtype=float)
# Get parameters and calculate spread
model = sm.OLS(y, x)
result = model.fit()
alpha, beta = result.params[0], result.params[1]
spread = y - (alpha + beta*x.T[1])
norm_spread = OpticsPairs.calc_zscore(spread)
norm_spreads.append(norm_spread)
# Convert spreads from list to dataframe
norm_spreads = pd.DataFrame(np.transpose(norm_spreads),
index=self.prices.index)
self.alpha = alpha
self.beta = beta
self.norm_spreads = norm_spreads
self.engle_granger_tests = pd.Series(engle_granger_tests)
@staticmethod
def get_ols_variables(security_0: str,
security_1: str):
"""
Compares t-stats of two Engle-Granger cointegration tests.
Returns independent and dependent variables for OLS.
:params security_0: String identifier of first security.
:params security_1: String identifier of second security.
"""
test_0 = ts.coint(security_0, security_1)
test_1 = ts.coint(security_1, security_0)
t_stat_0, pvalue_0 = test_0[0], test_0[1]
t_stat_1, pvalue_1 = test_1[0], test_1[1]
# Avoid reliance on dependent variable and choose smallest t-stat
# for Engle-Granger Test
# Use corresponding independent and dependent variables to
# calculate spread
if abs(t_stat_0) < abs(t_stat_1):
pvalue = pvalue_0
x = sm.add_constant(np.asarray(security_1))
y = np.asarray(security_0)
else:
pvalue = pvalue_1
x = sm.add_constant(np.asarray(security_0))
y = np.asarray(security_1)
return pvalue, x, y
def calc_hurst_exponents(self):
"""
Calculates Hurst exponent of each potential pair's normalized spread.
"""
if self.norm_spreads is None:
raise ValueError("norm_spreads not found: must run \
.calc_eg_norm_spreads before this function")
hurst_exponents = []
# Calculate Hurst exponents and generate series
for col in self.norm_spreads.columns:
hurst_exp = OpticsPairs.hurst(self.norm_spreads[col].values)
hurst_exponents.append(hurst_exp)
self.hurst_exponents = pd.Series(hurst_exponents)
def calc_half_lives(self):
"""
Calculates half-life of each potential pair's normalized spread.
"""
if self.norm_spreads is None:
raise ValueError("norm_spreads not found: must run \
.calc_eg_norm_spreads before this function")
self.norm_spreads.fillna(0, inplace=True)
self.half_lives = self.norm_spreads.apply(OpticsPairs.half_life)
def calc_avg_cross_count(self, trading_year: float = 252.0):
"""
Calculates the average number of instances per year the
normalized spread of potential pairs crosses the mean.
Authors recommend trading pairs that cross mean on average
12 times per year [1].
"""
if self.prices is None:
raise ValueError("prices not found: must initialize with \
price dataframe before this function")
if self.norm_spreads is None:
raise ValueError("norm_spreads not found: must run \
.calc_eg_norm_spreads() before this function")
# Find number of years
n_days = len(self.prices)
n_years = n_days/trading_year
# Find annual average cross count
cross_count = self.norm_spreads.apply(OpticsPairs.count_crosses)
self.avg_cross_count = cross_count/n_years
def filter_pairs(self,
max_pvalue: float = 0.05,
max_hurst_exp: float = 0.5,
max_half_life: float = 252.0,
min_half_life: float = 1.0,
min_avg_cross: float = 12.0):
"""
Generates a summary dataframe of potential pairs containing:
1. Engle-Granger p-value
2. Hurst exponent
3. Half-life
4. Average Cross Count
Filters summary dataframe to include pairs that meet user
specified criteria.
:param max_pvalue: A floating number to eliminate potential pairs with
Engle-Granger t-stat pvalues above max_pvalue. Default set to 5%.
:param max_hurst_exp: A floating number to eliminate potential
pairs with Hurst exponents greater than max_hurst_exp.
Values below 0.5 represent mean-reverting pairs.
Default set to 0.5.
:param max_half_life: A floating number to eliminate potential pairs
with half-lives above user defined value.
Default value set to 252.0.
:param min_half_life: A floating number to eliminate potential
pairs with half-lives below user defined value.
Default value set to 1.0.
:min_avg_cross: A floating number to eliminate potential pairs with
average cross count less than user defined value.
Default value set to 12.0
"""
required = [self.prices,
self.engle_granger_tests,
self.hurst_exponents,
self.half_lives,
self.avg_cross_count]
for i in required:
if i is None:
raise ValueError("Required: \n 1. prices \n 2. \
engle_granger_tests \n 3. hurst_exponents \
\n 4. half_lives \n 5. avg_cross_count")
# Generate summary dataframe of potential trading pairs
pairs_df = pd.concat([self.pairs,
self.engle_granger_tests,
self.hurst_exponents,
self.half_lives,
self.avg_cross_count],
axis=1)
pairs_df.columns = ['pair',
'pvalue',
'hurst_exp',
'half_life',
'avg_cross_count']
# Find pairs that meet user defined criteria
filtered_pairs = pairs_df.loc[
# Significant Engle-Grange test AND
(pairs_df['pvalue'] <= max_pvalue) &
# Mean reverting according to Hurst exponent AND
(pairs_df['hurst_exp'] < max_hurst_exp) &
# Half-life above minimum value AND
# Half-life below maximum value AND
((pairs_df['half_life'] >= min_half_life) &
(pairs_df['half_life'] <= max_half_life)) &
# Produces sufficient number of trading opportunities
(pairs_df['avg_cross_count'] >= min_avg_cross)]
self.pairs_df = pairs_df
self.filtered_pairs = filtered_pairs
if len(self.filtered_pairs) == 0:
print("No tradable pairs found. Try relaxing criteria.")
else:
n_pairs = len(self.filtered_pairs)
print(f"Found {n_pairs} tradable pairs!")
def plot_pair_price_spread(self, idx: int):
"""
Plots the price path of both securities in selected pair,
with dual axis. Plots the normalized spread of the price paths.
"""
required = [self.prices,
self.pairs,
self.norm_spreads,
self.half_lives,
self.avg_cross_count]
for i in required:
if i is None:
raise ValueError("Required: \n 1. prices \n 2. pairs \
\n 3. norm_spreads")
fontsize = 20
securities = self.pairs[idx]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(20, 10))
# first security (left axis)
security = securities[0]
color = 'tab:blue'
axs[0].plot(self.prices[security], color=color)
axs[0].set_ylabel(security, color=color, fontsize=fontsize)
axs[0].tick_params(axis='y', labelcolor=color)
axs[0].set_title('pair_'+str(idx)+' prices', fontsize=fontsize)
# second security (right axis)
security = securities[1]
color = 'tab:orange'
axs2 = axs[0].twinx()
axs2.plot(self.prices[security], color=color)
axs2.set_ylabel(security, color=color, fontsize=fontsize)
axs2.tick_params(axis='y', labelcolor=color)
# plot spread
axs[1].plot(self.norm_spreads[idx], color='black')
axs[1].set_ylabel('spread_z_score', fontsize=fontsize, color='white')
axs[1].set_xlabel('date', fontsize=fontsize, color='white')
axs[1].set_title('pair_'+str(idx)+' normalized spread',
fontsize=fontsize, color='white')
axs[1].axhline(0, color='blue', ls='--')
axs[1].axhline(1, color='r', ls='--')
axs[1].axhline(-1, color='r', ls='--')
fig.tight_layout()
def plot_explained_variance(self):
"""
Plots the cumulative variance explained by principal component
analysis.
"""
if self.explained_variance_ratio_ is None:
raise ValueError("explained_variance_ratio_ missing: run \
.reduce_PCA() before this function")
fig, axs = plt.subplots()
axs.set_title('PCA Cumulative Explained Variance')
axs.plot(np.cumsum(self.explained_variance_ratio_))
axs.set_xlabel('number of components')
axs.set_ylabel('explained variance')
fig.tight_layout()
def plot_loadings(self, n: int = 5):
"""
Plots up to 5 bar charts depicting the loadings of
each component, by security.
"""
if self.components_ is None:
raise ValueError("components_ missing: run \
.reduce_PCA() before this function")
n_loadings = min(n, self.n_components_)
fig, axs = plt.subplots(n_loadings, 1, sharex=True, figsize=(20, 20))
fontsize = 18
for i in range(n_loadings):
axs[i].bar([i for i in range(self.components_.shape[1])],
self.components_[i])
axs[i].set_ylabel('component_'+str(i)+' loading',
fontsize=fontsize)
axs[0].set_title('PCA Loadings', fontsize=fontsize)
axs[i].set_xlabel('security_observation', fontsize=fontsize)
fig.tight_layout()
def plot_clusters(self, n_dimensions: int = 2):
"""
Plots a 2-dimension or 3-dimension scatter plot of security principal
component loadings. Plots either the first two or three
principal components and colors securities according to their
cluster label found from OPTICS algorithm.
:param n_dimensions: An integer to denote how many dimensions to plot.
Default value is two.
"""
for i in [self.n_components_, self.components_, self.cluster_labels]:
if i is None:
raise ValueError("Required: \n 1. n_components \n 2. \
reduced_returns")
fontsize = 15
figsize = (10, 10)
if n_dimensions == 2:
fig, axs = plt.subplots(1, 1, figsize=figsize)
axs.scatter(self.components_[0].T[self.cluster_labels != -1],
self.components_[1].T[self.cluster_labels != -1],
c=self.cluster_labels[self.cluster_labels != -1])
axs.scatter(self.components_[0].T[self.cluster_labels == -1],
self.components_[1].T[self.cluster_labels == -1],
c=self.cluster_labels[self.cluster_labels == -1],
alpha=0.1)
axs.set_title('OPTICS Clusters', fontsize=fontsize)
axs.set_xlabel('component_0 loading', fontsize=fontsize)
axs.set_ylabel('component_1 loading', fontsize=fontsize)
fig.tight_layout()
elif n_dimensions == 3:
fig = plt.figure(figsize=figsize)
axs = fig.add_subplot(111, projection='3d')
axs.scatter(self.components_[0].T[self.cluster_labels != -1],
self.components_[1].T[self.cluster_labels != -1],
self.components_[2].T[self.cluster_labels != -1],
c=self.cluster_labels[self.cluster_labels != -1])
axs.scatter(self.components_[0].T[self.cluster_labels == -1],
self.components_[1].T[self.cluster_labels == -1],
self.components_[2].T[self.cluster_labels == -1],
c=self.cluster_labels[self.cluster_labels == -1],
alpha=0.1)
axs.set_title('OPTICS Clusters', fontsize=fontsize)
axs.set_xlabel('component_0 loading', fontsize=fontsize)
axs.set_ylabel('component_1 loading', fontsize=fontsize)
axs.set_zlabel('component_2 loading', fontsize=fontsize)
fig.tight_layout()
else:
warnings.warn("Cannot visualize more than three dimensions!")
@staticmethod
def hurst(norm_spread):
"""
Calculates Hurst exponent.
https://en.wikipedia.org/wiki/Hurst_exponent
:param norm_spread: An array like object used to calculate half-life.
"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
diffs = [np.subtract(norm_spread[l:], norm_spread[:-l]) for l in lags]
tau = [np.sqrt(np.std(diff)) for diff in diffs]
# Use a linear fit to estimate the Hurst Exponent
poly = np.polyfit( | np.log(lags) | numpy.log |
"""
Module for parsing intermediate data from Hipparcos and Gaia.
For Hipparcos (both reductions) and Gaia, the scan angle theta is the angle between the north
equitorial pole (declination) and the along-scan axis, defined as positive if east of the north pole
(positive for increasing RA).
Author:
<NAME>
<NAME>
"""
import numpy as np
import pandas as pd
from scipy import stats, special
import warnings
from ast import literal_eval
import os
import re
import glob
import itertools
from math import ceil, floor
import pkg_resources
from astropy.time import Time
from astropy.table import QTable, Column, Table
from htof import settings as st
from htof.utils.data_utils import merge_consortia, safe_concatenate
from htof.utils.parse_utils import gaia_obmt_to_tcb_julian_year
import abc
class DataParser(object):
"""
Base class for parsing Hip1, Hip2 and Gaia data. self.epoch, self.covariance_matrix and self.scan_angle are saved
as pandas.DataFrame. use .values (e.g. self.epoch.values) to call the ndarray version.
"""
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
along_scan_errs=None, parallax_factors=None, meta=None):
if meta is None:
meta = {}
self.scan_angle = pd.Series(scan_angle, dtype=np.float64)
self._epoch = pd.DataFrame(epoch, dtype=np.float64)
self.residuals = pd.Series(residuals, dtype=np.float64)
self.parallax_factors = pd.Series(parallax_factors, dtype=np.float64)
self.along_scan_errs = pd.Series(along_scan_errs, dtype=np.float64)
self.inverse_covariance_matrix = inverse_covariance_matrix
self.meta = meta
@staticmethod
def get_intermediate_data_file_path(star_id: str, intermediate_data_directory: str):
star_id = str(star_id)
filepath = os.path.join(os.path.join(intermediate_data_directory, '**/'), '*' + star_id + '*')
filepath_list = glob.glob(filepath, recursive=True)
if len(filepath_list) != 1:
# search for the star id with leading zeros stripped
filepath = os.path.join(os.path.join(intermediate_data_directory, '**/'), '*' + star_id.lstrip('0') + '*')
filepath_list = glob.glob(filepath, recursive=True)
if len(filepath_list) != 1:
# search for files with the full 6 digit hipparcos string
filepath = os.path.join(os.path.join(intermediate_data_directory, '**/'), '*' + star_id.zfill(6) + '*')
filepath_list = glob.glob(filepath, recursive=True)
if len(filepath_list) != 1:
# take the file with which contains only the hip id if there are multiple matches
filepath = os.path.join(os.path.join(intermediate_data_directory, '**/'), '*' + star_id.lstrip('0') + '*')
filepath_list = match_filename(glob.glob(filepath, recursive=True), star_id)
if len(filepath_list) == 0:
raise FileNotFoundError('No file with name containing {0} or {1} or {2} found in {3}'
''.format(star_id, star_id.lstrip('0'), star_id.zfill(6), intermediate_data_directory))
if len(filepath_list) > 1:
raise FileNotFoundError('Unable to find the correct file among the {0} files containing {1}'
'found in {2}'.format(len(filepath_list), star_id, intermediate_data_directory))
return filepath_list[0]
@staticmethod
def read_intermediate_data_file(star_id: str, intermediate_data_directory: str, skiprows, header, sep):
iad_filepath = DataParser.get_intermediate_data_file_path(star_id, intermediate_data_directory)
data = pd.read_csv(iad_filepath, sep=sep, skiprows=skiprows, header=header, engine='python')
return data
@abc.abstractmethod
def parse(self, star_id: str, intermediate_data_parent_directory: str, **kwargs):
pass # pragma: no cover
def julian_day_epoch(self):
return self._epoch.values.flatten()
@property
def epoch(self):
return self._epoch.values.flatten()
def calculate_inverse_covariance_matrices(self, cross_scan_along_scan_var_ratio=np.inf):
self.inverse_covariance_matrix = calc_inverse_covariance_matrices(self.scan_angle,
cross_scan_along_scan_var_ratio=cross_scan_along_scan_var_ratio,
along_scan_errs=self.along_scan_errs,
star_id=self.meta.get('star_id', None))
def write(self, path: str, *args, **kwargs):
"""
:param path: str. filepath to write out the processed data.
:param args: arguments for astropy.table.Table.write()
:param kwargs: keyword arguments for astropy.table.Table.write()
:return: None
Note: The IntermediateDataParser.inverse_covariance_matrix are added to the table as strings
so that they are easily writable. The icov matrix is saved a string.
Each element of t['icov'] can be recovered with ast.literal_eval(t['icov'][i])
where i is the index. ast.literal_eval(t['icov'][i]) will return a 2x2 list.
"""
t = self.as_table()
# transform icov matrices as writable strings.
t['icov'] = [str(icov.tolist()) for icov in t['icov']]
t.write(path, fast_writer=False, *args, **kwargs)
def as_table(self):
"""
:return: astropy.table.QTable
The IntermediateDataParser object tabulated.
This table has as columns all of the attributes of IntermediateDataParser.
For any attribute which is empty or None, the column will contain zeros.
"""
cols = [self.scan_angle, self.julian_day_epoch(), self.residuals, self.along_scan_errs, self.inverse_covariance_matrix]
cols = [Column(col) for col in cols]
# replacing incorrect length columns with empties.
cols = [col if len(col) == len(self) else Column(None, length=len(self)) for col in cols]
t = QTable(cols, names=['scan_angle', 'julian_day_epoch', 'residuals', 'along_scan_errs', 'icov'])
return t
def __add__(self, other):
all_scan_angles = pd.concat([self.scan_angle, other.scan_angle])
all_epoch = pd.concat([pd.DataFrame(self.julian_day_epoch()), pd.DataFrame(other.julian_day_epoch())])
all_residuals = pd.concat([self.residuals, other.residuals])
all_along_scan_errs = pd.concat([self.along_scan_errs, other.along_scan_errs])
# TODO: add parallax factors. Tricky because gaia missions do not have them.
all_inverse_covariance_matrix = safe_concatenate(self.inverse_covariance_matrix,
other.inverse_covariance_matrix)
return DataParser(scan_angle=all_scan_angles, epoch=all_epoch, residuals=all_residuals,
inverse_covariance_matrix=all_inverse_covariance_matrix,
along_scan_errs=all_along_scan_errs)
def __radd__(self, other):
if other == 0:
return self
return self.__add__(other)
def __len__(self):
return len(self._epoch)
class GaiaData(DataParser):
DEAD_TIME_TABLE_NAME = None
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
min_epoch=-np.inf, max_epoch=np.inf, along_scan_errs=None, meta=None):
super(GaiaData, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals, meta=meta,
inverse_covariance_matrix=inverse_covariance_matrix)
self.min_epoch = min_epoch
self.max_epoch = max_epoch
def parse(self, star_id, intermediate_data_directory, **kwargs):
self.meta['star_id'] = star_id
data = self.read_intermediate_data_file(star_id, intermediate_data_directory,
skiprows=0, header='infer', sep=r'\s*,\s*')
data = self.trim_data(data['ObservationTimeAtBarycentre[BarycentricJulianDateInTCB]'],
data, self.min_epoch, self.max_epoch)
data = self.reject_dead_times(data['ObservationTimeAtBarycentre[BarycentricJulianDateInTCB]'], data)
self._epoch = data['ObservationTimeAtBarycentre[BarycentricJulianDateInTCB]']
self.scan_angle = data['scanAngle[rad]']
def trim_data(self, epochs, data, min_mjd, max_mjd):
valid = np.logical_and(epochs >= min_mjd, epochs <= max_mjd)
return data[valid].dropna()
def reject_dead_times(self, epochs, data):
# there will be different astrometric gaps for gaia DR2 and DR3 because rejection criteria may change.
# hence we have the appropriate parsers have different values for DEAD_TIME_TABLE_NAME.
if self.DEAD_TIME_TABLE_NAME is None:
# return the data if there is no dead time table specified.
return data
dead_time_table = Table.read(self.DEAD_TIME_TABLE_NAME)
# convert on board mission time (OBMT) to julian day
for col, newcol in zip(['start', 'end'], ['start_tcb_jd', 'end_tcb_jd']):
dead_time_table[newcol] = gaia_obmt_to_tcb_julian_year(dead_time_table[col]).jd
# make a mask of the epochs. Those that are within a dead time window have a value of 0 (masked)
valid = np.ones(len(data), dtype=bool)
for entry in dead_time_table:
valid[np.logical_and(epochs >= entry['start_tcb_jd'], epochs <= entry['end_tcb_jd'])] = 0
# reject the epochs which fall within a dead time window
data = data[valid].dropna()
return data
class DecimalYearData(DataParser):
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
along_scan_errs=None, meta=None):
super(DecimalYearData, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals, meta=meta,
inverse_covariance_matrix=inverse_covariance_matrix)
def parse(self, star_id, intermediate_data_parent_directory, **kwargs):
pass # pragma: no cover
def julian_day_epoch(self):
return Time(self._epoch.values.flatten(), format='decimalyear').jd
def calc_inverse_covariance_matrices(scan_angles, cross_scan_along_scan_var_ratio=np.inf,
along_scan_errs=None, star_id=None):
"""
:param scan_angles: pandas.DataFrame.
data frame with scan angles, e.g. as-is from IntermediateDataParser.read_intermediate_data_file.
scan_angles.values is a numpy array with the scan angles
:param cross_scan_along_scan_var_ratio: var_cross_scan / var_along_scan
:param along_scan_errs: array. array of len(scan_angles), the errors in the along scan direction, one for each
scan in scan_angles.
:return An ndarray with shape (len(scan_angles), 2, 2), e.g. an array of covariance matrices in the same order
as the scan angles
"""
if along_scan_errs is None or len(along_scan_errs) == 0:
along_scan_errs = np.ones_like(scan_angles.values.flatten())
if np.any(np.isclose(along_scan_errs, 0)):
warnings.warn(f'The IAD of {star_id} contained an along scan error that '
'is zero. This is unphysical, the observation should '
'probably have been marked as rejected. '
'In order to compute the inverse covariance matrices for '
'this source we are setting this AL error to a large '
'number (1 arcsec) and continue. ', RuntimeWarning)
along_scan_errs[np.isclose(along_scan_errs, 0)] = 1000
icovariance_matrices = []
icov_matrix_in_scan_basis = np.array([[1, 0],
[0, 1/cross_scan_along_scan_var_ratio]])
for theta, err in zip(scan_angles.values.flatten(), along_scan_errs):
c, s = np.cos(theta), np.sin(theta)
Rot = np.array([[s, -c], [c, s]])
icov_matrix_in_ra_dec_basis = np.matmul(np.matmul(1/(err ** 2) * Rot, icov_matrix_in_scan_basis), Rot.T)
icovariance_matrices.append(icov_matrix_in_ra_dec_basis)
return np.array(icovariance_matrices)
class HipparcosOriginalData(DecimalYearData):
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
along_scan_errs=None):
super(HipparcosOriginalData, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals,
inverse_covariance_matrix=inverse_covariance_matrix)
def parse(self, star_id, intermediate_data_directory, data_choice='MERGED'):
"""
:param star_id: a string which is just the number for the HIP ID.
:param intermediate_data_directory: the path (string) to the place where the intermediate data is stored, e.g.
Hip2/IntermediateData/resrec
note you have to specify the file resrec or absrec. We use the residual records, so specify resrec.
:param data_choice: 'FAST' or 'NDAC', 'BOTH', or 'MERGED. The standard is 'MERGED' which does a merger
of the 'NDAC' and 'FAST' data reductions in the same way as the hipparcos 1991.25 catalog. 'BOTH' keeps
both consortia's data in the IAD, which would be unphysical and is just for debugging. 'FAST' would keep
only the FAST consortia data, likewise only NDAC would be kept if you selected 'NDAC'.
"""
if (data_choice != 'NDAC') and (data_choice != 'FAST') and (data_choice != 'MERGED')\
and (data_choice != 'BOTH'):
raise ValueError('data choice has to be either NDAC or FAST or MERGED or BOTH.')
self.meta['star_id'] = star_id
data = self.read_intermediate_data_file(star_id, intermediate_data_directory,
skiprows=10, header='infer', sep=r'\s*\|\s*')
data = self._fix_unnamed_column(data)
data = self._select_data(data, data_choice)
# compute scan angles and observations epochs according to van Leeuwen & Evans 1998
# 10.1051/aas:1998218, eq. 11 & 12.
self.scan_angle = np.arctan2(data['IA3'], data['IA4']) # unit radians, arctan2(sin, cos)
# Use the larger denominator when computing the epoch offset.
# This increases numerical precision and avoids NaNs if one of the two fields (IA3, IA4) is exactly zero.
self._epoch = 1991.25 + (data['IA6'] / data['IA3']).where(abs(data['IA3']) > abs(data['IA4']), (data['IA7'] / data['IA4']))
self.residuals = data['IA8'] # unit milli-arcseconds (mas)
self.along_scan_errs = data['IA9'] # unit milli-arcseconds
self.parallax_factors = data['IA5']
@staticmethod
def _select_data(data, data_choice):
# restrict intermediate data to either NDAC, FAST, or merge the NDAC and FAST results.
if data_choice == 'MERGED':
data = merge_consortia(data)
elif data_choice != 'BOTH':
data = data[data['IA2'].str.upper() == {'NDAC': 'N', 'FAST': 'F'}[data_choice]]
return data
@staticmethod
def _fix_unnamed_column(data, correct_key='IA2', col_idx=1):
data.rename(columns={data.columns[col_idx]: correct_key}, inplace=True)
return data
class HipparcosRereductionDVDBook(DecimalYearData):
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
along_scan_errs=None, meta=None):
super(HipparcosRereductionDVDBook, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals, meta=meta,
inverse_covariance_matrix=inverse_covariance_matrix)
self._additional_rejected_epochs = {} # epochs that need to be rejected due to the write out bug.
self._rejected_epochs = {} # epochs that are known rejects, e.g.,
# those that have negative AL errors in the java tool
def read_header(self, star_id, intermediate_data_directory):
header = self.read_intermediate_data_file(star_id, intermediate_data_directory,
skiprows=0, header=None, sep=r'\s+')
return header
def parse(self, star_id, intermediate_data_directory, error_inflate=True, header_rows=1,
attempt_adhoc_rejection=True, **kwargs):
"""
:param: star_id:
:param: intermediate_data_directory:
:param: error_inflate: True if the along-scan errors are to be corrected by the inflation factor
according to Appendix B of D. Michalik et al. 2014. Only turn this off for tests, or if the parameters
required to compute the error inflation are unavailable.
:param: header_rows: int.
:return:
Compute scan angles and observations epochs from van Leeuwen 2007, table G.8
see also Figure 2.1, section 2.5.1, and section 4.1.2
NOTE: that the Hipparcos re-reduction book and the figures therein describe the
scan angle against the north ecliptic pole.
NOTE: In the actual intermediate astrometry data on the DVD the scan angle psi
is given in the equatorial system. This is similar to the original
Hipparcos and Gaia (Source: private communication between <NAME> and <NAME>, April 2019), which define the scan angle theta
as East of the North equatorial pole. theta = pi / 2 - psi,
see Brandt et al. (2021), Section 2.2.2."
"""
self.meta['star_id'] = star_id
header = self.read_header(star_id, intermediate_data_directory)
data = self.read_intermediate_data_file(star_id, intermediate_data_directory,
skiprows=header_rows, header=None, sep=r'\s+')
self.scan_angle = np.arctan2(data[3], data[4]) # data[3] = sin(theta) = cos(psi), data[4] = cos(theta) = sin(psi)
self._epoch = data[1] + 1991.25
self.residuals = data[5] # unit milli-arcseconds (mas)
self.along_scan_errs = data[6] # unit milli-arcseconds (mas)
self.parallax_factors = data[2]
self.meta['catalog_f2'] = header.iloc[0][6]
self.meta['catalog_soltype'] = header.iloc[0][4]
# TODO need to calculate f2 newly using htof. Like we do in the java tool.
n_transits, nparam, percent_rejected = header.iloc[0][2], get_nparam(header.iloc[0][4]), header.iloc[0][7]
if attempt_adhoc_rejection:
warnings.warn(f"For source {self.meta['star_id']}. The DVD IAD does not indicate which observation epochs were "
"rejected for the final solution. htof will attempt to find which epochs to "
"reject in order to reproduce the catalog parameters. However, if this source "
"also has some corrupted residuals (see Brandt et al. 2021, Section 4), then "
"this will fail. We recommend you switch to using the IAD from the Java tool, "
"since that version of the IAD indicates rejected epochs with negative "
"uncertainties.", UserWarning)
self.rejected_epochs = find_epochs_to_reject_DVD(self, n_transits, percent_rejected, nparam, self.meta['catalog_f2'])
if error_inflate:
# adjust the along scan errors so that the errors on the best fit parameters match the catalog.
self.along_scan_errs *= self.error_inflation_factor(n_transits, nparam, self.meta['catalog_f2'])
return header, data
@staticmethod
def error_inflation_factor(ntr, nparam, f2):
"""
:param ntr: int. Number of transits used in the catalog solution. I.e. this should be
N_transit_total - N_reject. So if N_reject is unknown, then the error inflation factor will be slightly wrong.
:param nparam: int. Number of parameters used in the solution (e.g. 5, 7, 9..)
:param f2: float. Goodness of fit metric. field F2 in the Hipparcos Re-reduction catalog.
:return: u. float.
The errors are to be scaled by u = Sqrt(Q/v) in equation B.4 of <NAME> et al. 2014.
(Title: Joint astrometric solution of Hipparcos and Gaia)
NOTE: ntr (the number of transits) given in the header of the Hip2 IAD, is not necessarily
the number of transits used in the actual solution.
"""
num_transits_used = ntr
nu = num_transits_used - nparam # equation B.1 of <NAME> et al. 2014
Q = nu * (np.sqrt(2/(9*nu))*f2 + 1 - 2/(9*nu))**3 # equation B.3
u = np.sqrt(Q/nu) # equation B.4. This is the chi squared statistic of the fit.
return u
def _reject_epochs(self, attr_to_set, value):
residuals_to_reject, orbits_to_reject = value['residual/along_scan_error'], value['orbit/scan_angle/time']
not_outlier = np.ones(len(self), dtype=bool)
np.put(not_outlier, residuals_to_reject, False)
self.residuals, self.along_scan_errs = self.residuals[not_outlier], self.along_scan_errs[not_outlier]
not_outlier = np.ones(len(self), dtype=bool)
np.put(not_outlier, orbits_to_reject, False)
self._epoch, self.scan_angle = self._epoch[not_outlier], self.scan_angle[not_outlier]
self.parallax_factors = self.parallax_factors[not_outlier]
setattr(self, attr_to_set, value)
@property
def additional_rejected_epochs(self):
return self._additional_rejected_epochs
@additional_rejected_epochs.setter
def additional_rejected_epochs(self, value):
self._reject_epochs('_additional_rejected_epochs', value)
@property
def rejected_epochs(self):
return self._rejected_epochs
@rejected_epochs.setter
def rejected_epochs(self, value):
self._reject_epochs('_rejected_epochs', value)
class HipparcosRereductionJavaTool(HipparcosRereductionDVDBook):
EPOCHREJECTLIST = Table.read(pkg_resources.resource_filename('htof',
'data/epoch_reject_shortlist.csv'), format='ascii')
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None,
along_scan_errs=None, meta=None):
super(HipparcosRereductionJavaTool, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals,
inverse_covariance_matrix=inverse_covariance_matrix,
meta=meta)
def read_header(self, star_id, intermediate_data_directory):
fpath = self.get_intermediate_data_file_path(star_id, intermediate_data_directory)
with open(fpath) as f:
lines = f.readlines()
hline_fst = [float(i) for i in lines[6].split('#')[1].split()]
hline_scd = [float(i) for i in lines[8].split('#')[1].split()]
hline_trd = [float(i) if not ('---' in i) else np.nan for i in lines[10].split('#')[1].split()]
hline_fst = {key: val for key, val in zip(['HIP', 'MCE', 'NRES', 'NC',
'isol_n', 'SCE', 'F2', 'F1'], hline_fst)}
hline_scd = {key: val for key, val in zip(['Hp','B-V','VarAnn','NOB','NR'], hline_scd)}
hline_trd = {key: val for key, val in zip(['RAdeg', 'DEdeg', 'Plx', 'pm_RA', 'pm_DE',
'e_RA', 'e_DE', 'e_Plx', 'e_pmRA', 'e_pmDE', 'dpmRA',
'dpmDE', 'e_dpmRA', 'e_dpmDE', 'ddpmRA', 'ddpmDE',
'e_ddpmRA', 'e_ddpmDE', 'upsRA', 'upsDE', 'e_upsRA',
'e_upsDE', 'var'], hline_trd)}
return {'first': hline_fst, 'second': hline_scd, 'third': hline_trd}
def parse(self, star_id, intermediate_data_directory, error_inflate=True, attempt_adhoc_rejection=True,
reject_known=True, **kwargs):
self.meta['star_id'] = star_id
header = self.read_header(star_id, intermediate_data_directory)
raw_data = self.read_intermediate_data_file(star_id, intermediate_data_directory,
skiprows=13, header=None, sep=r'\s+')
self.scan_angle = np.arctan2(raw_data[3], raw_data[4]) # data[3] = sin(theta) = cos(psi), data[4] = cos(theta) = sin(psi)
self._epoch = raw_data[1] + 1991.25
self.residuals = raw_data[5] # unit milli-arcseconds (mas)
self.along_scan_errs = raw_data[6] # unit milli-arcseconds (mas)
self.parallax_factors = raw_data[2]
self.meta['catalog_f2'] = header['first']['F2']
self.meta['catalog_soltype'] = header['first']['isol_n']
n_transits, n_expected_transits = header['first']['NRES'], header['second']['NOB']
n_additional_reject = int(n_transits) - int(n_expected_transits)
# self.meta['catalog_f2'] = header.iloc[0][6] # this is already set in HipparcosRereductionDVDBook.parse()
# self.meta['catalog_soltype'] = header.iloc[0][4] # this is already set in HipparcosRereductionDVDBook.parse()
max_n_auto_reject = 4
if attempt_adhoc_rejection:
if 3 >= n_additional_reject > 0:
self.additional_rejected_epochs = find_epochs_to_reject_java(self, n_additional_reject)
if max_n_auto_reject >= n_additional_reject > 3:
orbit_number = raw_data[0].values
self.additional_rejected_epochs = find_epochs_to_reject_java_large(self, n_additional_reject, orbit_number)
if n_additional_reject > max_n_auto_reject:
# These take too long to do automatically, pull the epochs to reject from the file that we computed
correct_id = header['first']['HIP']
t = self.EPOCHREJECTLIST[self.EPOCHREJECTLIST['hip_id'] == int(correct_id)]
if len(t) == 1:
self.additional_rejected_epochs = {'residual/along_scan_error': literal_eval(t['residual/along_scan_error'][0]),
'orbit/scan_angle/time': literal_eval(t['orbit/scan_angle/time'][0])}
else:
warnings.warn(f'Cannot fix {star_id}. It has more than {max_n_auto_reject} corrupted epochs than can be '
f'corrected on-the-fly. The correct epochs to reject are not in our precomputed list '
f'(epoch_reject_shortlist.csv). This happens for sources where it is computationally '
f'infeasible to find an ad-hoc correction.', UserWarning) # pragma: no cover
if not attempt_adhoc_rejection and n_additional_reject > 0:
warnings.warn(f"attempt_adhoc_rejection = False and {star_id} has {n_additional_reject} "
"discrepant observations. You have disabled the ad-hoc "
"correction for this Java tool source. The IAD do not correspond "
"to the best fit catalog solution. ", UserWarning)
epochs_to_reject = np.where(self.along_scan_errs <= 0)[0] # note that we have to reject
# the epochs with negative along scan errors (the formally known epochs that need to be rejected)
# AFTER we have done the bug correction (rejected the epochs from the write out bug). This order
# is important because the ad-hoc correction shuffles the orbits.
if len(epochs_to_reject) > 0 and reject_known:
# setting self.rejected_epochs also rejects the epochs (see the @setter)
self.rejected_epochs = {'residual/along_scan_error': list(epochs_to_reject),
'orbit/scan_angle/time': list(epochs_to_reject)}
# compute f2 of the residuals (with ad-hoc correction where applicable)
nparam = get_nparam(str(int(header['first']['isol_n'])))
Q = np.sum((self.residuals/self.along_scan_errs)**2)
n_transits_final = len(self)
# note that n_transits_final = n_expected_transits - number of indicated rejects (By negative AL errors)
self.meta['calculated_f2'] = special.erfcinv(stats.chi2.sf(Q, n_transits_final - nparam)*2)*np.sqrt(2)
if error_inflate:
# WARNING: we use the catalog (Van Leeuwen 2014 Java tool F2) f2 value here to calculate the error inflation
# factor. this is because for some sources, the calculated f2 value is much larger than the
# catalog value. E.g., HIP 87275 has a catalog f2 of 65.29, and a newly calculated f2 is using
# chi2.sf is infinity.
# Therefore the error inflation in the catalog is ~7, while the error inflation assuming
# the new f2 is infinity. We adopt the catalog f2 so as to reproduce the catalog solution and errors.
# The developers have not yet found this f2 discrepency to be an issue, but any source with it
# should still be treated with caution.
self.along_scan_errs *= self.error_inflation_factor(n_transits_final, nparam, self.meta['catalog_f2'])
return header, raw_data
class GaiaDR2(GaiaData):
DEAD_TIME_TABLE_NAME = pkg_resources.resource_filename('htof', 'data/astrometric_gaps_gaiadr2_08252020.csv')
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None, meta=None,
min_epoch=st.GaiaDR2_min_epoch, max_epoch=st.GaiaDR2_max_epoch, along_scan_errs=None):
super(GaiaDR2, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals,
inverse_covariance_matrix=inverse_covariance_matrix,
min_epoch=min_epoch, max_epoch=max_epoch, meta=meta)
class GaiaeDR3(GaiaData):
DEAD_TIME_TABLE_NAME = pkg_resources.resource_filename('htof', 'data/astrometric_gaps_gaiaedr3_12232020.csv')
def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None, meta=None,
min_epoch=st.GaiaeDR3_min_epoch, max_epoch=st.GaiaeDR3_max_epoch, along_scan_errs=None):
super(GaiaeDR3, self).__init__(scan_angle=scan_angle, along_scan_errs=along_scan_errs,
epoch=epoch, residuals=residuals,
inverse_covariance_matrix=inverse_covariance_matrix,
min_epoch=min_epoch, max_epoch=max_epoch, meta=meta)
def digits_only(x: str):
return re.sub("[^0-9]", "", x)
def match_filename(paths, star_id):
return [f for f in paths if digits_only(os.path.basename(f).split('.')[0]).zfill(6) == star_id.zfill(6)]
def find_epochs_to_reject_DVD(data: DataParser, n_transits, percent_rejected, nparam, catalog_f2):
# just looks for combinations of orbits within the dvd IAD that yield a stationary point of chisquared.
# Note that this does not work for sources with the data corruption.
chi2_thresh = 1
possible_rejects = np.arange(len(data))
min_n_reject = max(floor((percent_rejected - 1) / 100 * n_transits), 0)
max_n_reject = max(ceil((percent_rejected + 1) / 100 * n_transits), 1)
max_n_reject = min(max_n_reject, 3) # limit to three rejected sources so that combinatorics dont blow up.
# calculate the chisquared partials
sin_scan = np.sin(data.scan_angle.values)
cos_scan = np.cos(data.scan_angle.values)
dt = data.epoch - 1991.25
rows_to_keep = np.ones(len(data), dtype=bool)
orbit_factors = np.array([data.parallax_factors.values, sin_scan, cos_scan, dt * sin_scan, dt * cos_scan])
residual_factors = (data.residuals.values / data.along_scan_errs.values ** 2)
chi2_vector = (2 * residual_factors * orbit_factors).T
sum_chisquared_partials_norejects = np.sqrt(np.sum(np.sum(chi2_vector, axis=0) ** 2))
# we should be able to do the orbit reject calculation fairly easily in memory.
# for 100 choose 3 we have like 250,000 combinations of orbits -- we should be able to
# do those in 10,000 orbit chunks in memory and gain a factor of 10,000 speed up.
candidate_row_rejects_pern = [[]]
candidate_row_chisquared_partials_pern = [sum_chisquared_partials_norejects]
n_reject = max(min_n_reject, 1)
while n_reject < max_n_reject:
candidate_row_rejects = []
candidate_row_chisquared_partials = []
combinations = list(set(itertools.combinations(possible_rejects, int(n_reject))))
for rows_to_reject in combinations:
rows_to_keep[list(rows_to_reject)] = False
# sum the square of the chi2 partials to decide for whether or not it is a stationary point.
sum_chisquared_partials = np.sqrt(np.sum(np.sum(chi2_vector[rows_to_keep], axis=0) ** 2))
candidate_row_rejects.append(rows_to_reject)
candidate_row_chisquared_partials.append(sum_chisquared_partials)
# reset for the next loop:
rows_to_keep[list(rows_to_reject)] = True
n_reject += 1
candidate_row_rejects_pern.append(np.array(candidate_row_rejects)[np.argmin(candidate_row_chisquared_partials)])
candidate_row_chisquared_partials_pern.append(np.min(candidate_row_chisquared_partials))
# see if any of the rejections are viable (i.e., check if this IAD is messed up in an unrepairable way)
if np.min(candidate_row_chisquared_partials_pern) > chi2_thresh:
warnings.warn(f"Failed to find which observations of this DVD source {data.meta['star_id']} "
f"that should have been marked as rejected. "
f"The chi squared partials were larger than {chi2_thresh}. "
f"DVD source {data.meta['star_id']} is likely a source with corrupted data. "
f"Aborting rejection routine and using IAD as was "
f"read from the DVD data. ", UserWarning) # pragma: no cover
return {'residual/along_scan_error': [], 'orbit/scan_angle/time': []}
# exclude any rejections that do not yield stationary points.
viable_rejections = np.where( | np.array(candidate_row_chisquared_partials_pern) | numpy.array |
# SPDX-License-Identifier: (BSD-3)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python module for Time Domain Moment Tensor Inversion (tdmtpy), Version: 0.1
Last updated on February 11, 2020
@module: tdmtpy
@author: <NAME> (<EMAIL>)
Usage
1. From command line: tdmt [input_file], if no input file is specified code will look for default input file "./mtinv.in"
2. Input file example (e.g. mtinv.in), check :Class: `tdmtpy.Header` and :Class: `tdmtpy.Station` for more details on the input parameters.
datetime: 2019-07-16T20:10:31.473
longitude: -121.757
latitude: 37.8187
data_dir: example/dc
green_dir: example/gf
greentype: herrmann
component: ZRT
depth: 10
degree: 5
weight: 1
plot: 1
correlate: 0
NAME DISTANCE AZIMUTH SHIFT NPTS DT USED(1/0) FILTER NC NP LCRN HCRN MODEL STLO STLA
BK.FARB.00 110 263 30 100 1.0 1 bp 2 2 0.05 0.1 gil7 -123.0011 37.69782
BK.SAO.00 120 167 30 150 1.0 1 bp 2 2 0.05 0.1 gil7 -121.44722 36.76403
BK.CMB.00 123 78 30 150 1.0 0 bp 2 2 0.05 0.1 gil7 -120.38651 38.03455
BK.MNRC.00 132 333 30 150 1.0 1 bp 2 2 0.05 0.1 gil7 -122.44277 38.87874
NC.AFD. 143 29 30 150 1.0 1 bp 2 2 0.05 0.1 gil7 -120.968971 38.94597
3. Data and Green's functions are in SAC binary format, and are corrected for instrument response,
filtered, and decimated. File name coventions are described below:
Data - [NAME].[COMPONENTS].dat
NAME = station name in inputfile
COMPONENTS = t, r, z
e.g. BK.CMB.00.z.dat, BK.CMB.00.t.dat
GFs - [NAME].[DEPTH].[GF_NAME]
NAME = station name in inputfile
DEPTH = source depth with four significant digits
COMPONENTS = t, r, z
GF_NAME = herrmann format has 10: tss tds rss rds rdd zss zds zdd rex zex, e.g. BK.CMB.00.10.0000.zds
tensor format has 18 (if using all three components): xx, yy, zz, xy, xz, yz, e.g. BK.CMB.00.10.0000.zxy
4. Two output files are created "mtinv.out" and "max.mtinv.out" after running the code.
mtinv.out = moment tensor depth search results, best solution on the second line (after header)
max.mtinv.out = best solution with the highest VR, includes additional station information
5. If plot = 1 code will generate figures (e.g. figure0.pdf, figure1.pdf, etc.) with beach balls and waveform fits plotted
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from obspy.core import read, Stream
from scipy.signal import fftconvolve
import sys
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib.collections as mpl_collections
from matplotlib import patches, transforms
from obspy.imaging.beachball import xy2patch
from obspy.imaging.scripts.mopad import BeachBall as mopad_BeachBall
from obspy.imaging.scripts.mopad import MomentTensor as mopad_MomentTensor
from obspy.imaging.scripts.mopad import epsilon
from math import pi, sqrt
def loadfile(path_and_file=None):
"""
Input text file containing all information needed to set :class:`tdmtpy.Header` and
:class:`tdmtpy.Station` attributes.
"""
if path_and_file is None:
fname = 'mtinv.in'
else:
fname = path_and_file
try:
with open(fname) as f: pass
except IOError:
raise IOError("Input file '%s' not found."%fname)
# `Header` names and corresponding types
parser = dict(datetime = str,
longitude = float,
latitude = float,
datadir = str,
greendir = str,
greentype = lambda x: x.upper(),
component = lambda x: x.lower(),
depth = _set_depth,
degree = int,
weight = int,
plot = int,
correlate = int)
# Read headers and define formats
with open(fname,'r') as f:
for key, parse in parser.items():
parser[key] = parse(next(f).split()[1])
next(f) # Skip Line 14
items = [ line.split() for line in f ] # Station-specific info
header = Header(parser)
# `Station` names and corresponding types
parser = dict(name = np.object_,
distance = np.float_,
azimuth = np.float_,
shift = np.int_,
npts = np.int_,
dt = np.float_,
used = np.object_,
filtertype = np.object_,
nc = np.int_,
np = np.int_,
lcrn = np.float_,
hcrn = np.float_,
model = np.object_,
stlo = np.float_,
stla = np.float_)
for (key, parse), col in zip(parser.items(),zip(*items)):
parser[key] = parse(col)
parser['name'] = parser['name'].astype('U')
parser['used'] = parser['used'].astype('U')
parser['filtertype'] = parser['filtertype'].astype('U')
parser['model'] = parser['model'].astype('U')
station = Station(parser,ncomp=len(header.component))
mt = TDMT(header=header,station=station)
return mt
def _set_depth(depth_str):
"""
Depth search based on minimum source depth, maximum source depth, and vertical spacing
:param depth_str: a comma-delimited string depth_min,depth_max,spacing (e.g. 10,30,5) or a fixed depth
"""
depth = np.float_(depth_str.split(':'))
if depth.size == 3:
start = depth[0]
step = depth[2]
stop = depth[1] + step
depth_profile = [ z for z in np.arange(start,stop,step) ]
elif depth.size == 1:
depth_profile = depth
else: raise ValueError
return depth_profile
class Header(object):
"""
Container for header information of tdmtpy TDMT object
Header object contains all header information of a :class:`tdmtpy.TDMT` object. These are
required for every TDMT object.
:param hdr: dict, optional
dict containing headers loaded from :func: `tdmtpy.loadfile`
.. rubric:: Attributes, defaults to None if not supplied. All attributes must be defined to run inversion.
datetime: string
Event origin time in ObsPy UTCDatetime calendar/ordinal date representation
(e.g. "2009-W53-7T12:23:34.5" or "2009-365T12:23:34.5" )
longitude: float
Event longitude in decimal degrees
latitude: float
Event latitude in decimal degrees
datadir: string
Path to data files
greendir: string
Path to green's function files
greentype: string
Green's function format. Supported formats are: "HERRMANN"-1D elementary seismograms,
"TENSOR"-1D or 3D Green's tensor.
component: string
Data and Green's function components. Options are: "ZRT"-vertica, radial and transverse,
"Z"-vertical only.
depth: np.float
Fixed source depth or vertical depth profile on which the inverse is solved.
Depth profile is constructed using the parameters speicifed in the input file.
See Class Header.set_depth for more details.
degree: int
Number of independent parameters in the inversion. Options are: 5-deviatoric inversion,
6-full inversion.
weight: int
Data weighting function, same weights are applited to all components of a station.
Options are: 0-No weights applied, 1-Inverse distance weighting, 2-inverse variance weighting
plot: int
Whether or not to plot results. False/0-No, True/1-Yes
correlate: int
Whether or not to search for best time shifts between data and synthetics using cross-correlation.
False/0-No, True/1-Yes.
"""
def __init__(self,hdr=None):
if hdr is None:
self.datetime = None
self.longitude = None
self.latitude = None
self.datadir = None
self.greendir = None
self.greentype = None
self.component = None
self.depth = None
self.degree = None
self.weight = None
self.plot = False
self.time_search = 0
self.correlate = 0
else:
self._read_hdr(hdr)
def __str__(self):
f = "{0:>15}: {1}\n"
ret = ''.join([ f.format(key,str(getattr(self,key))) for key in vars(self)] )
return ret
def _read_hdr(self,hdr):
self.datetime = hdr['datetime']
self.longitude = hdr['longitude']
self.latitude = hdr['latitude']
self.datadir = hdr['datadir']
self.greendir = hdr['greendir']
self.greentype = hdr['greentype']
self.component = hdr['component']
self.depth = hdr['depth']
self.degree = hdr['degree']
self.weight = hdr['weight']
self.plot = hdr['plot']
self.correlate = hdr['correlate']
def _repr_pretty_(self, p, cycle):
p.text(str(self))
class Station(object):
"""
Container for station information of tdmtpy TDMT object
Station object contains all station information of a :class:`tdmtpy.TDMT` object. These are
required for every TDMT object.
:param stat: dict, optional
A dictionary containing station-specific parameters id (file name), distance, azimuth,
dt, npts, shift, used. See README on format.
:param ncomp: int, required
number of components
.. rubric:: Attributes, required unless marked as optional
nsta: int
Number of stations
ncomp: int
Numbber of components
name: np.string
Station/file names
distance: np.float
Source-receiver distance
azimuth: np.float
Source-receiver azimuth
dt: np.float
Sampling interval
npts: np.int
Number of samples
shift: np.int
Time shift in samples
used: np.int
Invert data (1) or prediction only (0)
index1: np.int
Beginning of each component/trace in d = Gm
index2: np.int
End of each component/trace in d = Gm
"""
def __init__(self,stat=None,ncomp=None):
if stat is None:
self.nsta = None
self.ncomp = None
self.name = None
self.distance = None
self.azimuth = None
self.shift = None
self.npts = None
self.dt = None
self.used = None
self.index1 = None
self.index2 = None
else:
self._read_stat(stat,ncomp)
def __str__(self):
f = "{0:>10}: {1}\n"
ret = ''.join([ f.format(key,str(getattr(self,key))) for key in vars(self)] )
return ret
def _read_stat(self,stat,ncomp):
self.nsta = len(stat['name'])
self.ncomp = ncomp
self.name = stat['name']
self.distance = stat['distance']
self.azimuth = stat['azimuth']
self.shift = stat['shift']
self.npts = stat['npts']
self.dt = stat['dt']
self._set_used(stat['used'])
self._set_indices()
def _set_used(self,used):
"""
Determine which components to invert
"""
if self.ncomp == 1:
self.used = np.ones((self.nsta,1),dtype=np.int)
self.used.T[:] = used
else:
self.used = np.ones((self.nsta,3),dtype=np.int)
if np.any(np.char.rfind(used,',') != -1):
for i in range(self.nsta):
self.used[i,:] = used[i].split(',')
else:
self.used.T[:] = used
def _set_indices(self):
"""
Calculate indices of each component in inversion matrix (d and G)
"""
if self.npts is None or self.ncomp is None or self.nsta is None:
print('Missing station information.')
else:
index2 = np.cumsum(np.repeat(self.npts,self.ncomp), dtype=np.int)
index1 = np.zeros(self.ncomp * self.nsta, dtype=np.int)
index1[1::] = index2[0:-1]
self.index1 = index1.reshape(self.nsta,self.ncomp)
self.index2 = index2.reshape(self.nsta,self.ncomp)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def xcorr(arr1,arr2,normalize=True):
"""
Compute cross-correlation coefficient
"""
c = fftconvolve(arr2[::-1],arr1,mode='valid')
if normalize:
norm = (np.sum(arr1**2)*np.sum(arr2**2))**0.5
if norm <= np.finfo(float).eps: # norm is zero
c[:] = 0
elif c.dtype == float:
c /= norm
else:
c = c/norm
return c
def invert(d,G,w,greentype,degree,plot,nsta,used,index1,index2):
# d = data
a = invert4a(d,G,w)
M = a2M(a,greentype,degree)
# Variance reduction
Gm = np.dot(G,a)
dGm = w*(d-Gm)**2
dd = w*d**2
VR = (1-np.sum(dGm)/np.sum(dd))*100
# Station VR
used = np.sum(used,axis=1)
ind1 = index1[:,0]
ind2 = index2[:,-1]
staVR = [ (1-np.sum(dGm[b:e])/np.sum(dd[b:e]))*100
if yes else None for yes, b, e in zip(used,ind1,ind2) ]
out = {}
out.update(decompose(M,plot))
if plot:
out['a'] = a # a coefficients to compute synthetics
out['VR'] = VR
out['staVR'] = staVR
# Display outputs
iso = {5:'Deviatoric',6:'Full'}
print('\n%s Moment Tensor Inversion'%iso[degree])
print('Mw = %.2f'%out['mw'])
print('Percent DC/CLVD/ISO = %d/%d/%d'%(out['pdc'],out['pclvd'],out['piso']))
print('VR = %.2f%%'%out['VR'])
return out
def gaussj(A,b):
"""
Gaussian-Jordan elimination and back substitution
"""
# Make a copy to avoid altering input
x = np.copy(b)
n = len(x)
# Gaussian-Jordan elimination
for k in range(0,n-1):
for i in range(k+1,n):
if A[i,k] != 0.0:
lam = A[i,k]/A[k,k]
A[i,k+1:n] = A[i,k+1:n] - lam*A[k,k+1:n]
x[i] = x[i]-lam*x[k]
# Back substitution
for k in range(n-1,-1,-1):
x[k] = (x[k]-np.dot(A[k,k+1:n],x[k+1:n]))/A[k,k]
return x
def invert4a(d,G,w):
# Perform linear least-squares inversion
# coefficients to elementary moment tensors
d = d[w!=0]
G = G[w!=0,:] # squeeze, remove single-dimensional entries
w = w[w!=0]
Gt = (G).transpose()
Gtd = np.dot(Gt,np.dot(np.diag(w),d))
GtG = np.dot(Gt,np.dot(np.diag(w),G))
# Compute the inverse GtGinv
a = gaussj(GtG,Gtd)
return a
def a2M(a,greentype,degree):
# aij coefficients (weights to elementary moment tensors) to moment tensor elements
M = [ None for _ in range(6) ]
# Ordering Mxx, Myy, Mzz, Mxy, Mxz, Myz
if greentype == 'TENSOR':
M[3] = a[0]
M[4] = a[3]
M[5] = a[2]
if degree == 6:
M[0] = a[1] - a[4] + a[5]
M[1] = -a[1] + a[5]
M[2] = a[4] + a[5]
elif degree == 5:
M[0] = a[1] - a[4]
M[1] = -a[1]
M[2] = a[4]
else: pass
if greentype == 'HERRMANN':
M[0] = a[0]
M[1] = a[3]
M[3] = a[1]
M[4] = a[2]
M[5] = a[4]
if degree == 6:
M[2] = a[5]
elif degree == 5:
M[2] = -(a[0]+a[3])
else: pass
return M
def find_strike_rake_dip(u,n):
"""
Compute strike,rake and dip
"""
# Inputs: u = slip vector, n = fault normal
dip = np.arccos(-1*u[2])*180/np.pi
strike = np.arcsin(-1*u[0]/np.sqrt(u[0]**2+u[1]**2))*180/np.pi
# Determine the quadrant
if u[1] < 0:
strike = 180-strike
rake = np.arcsin(-1*n[2]/np.sin(dip*np.pi/180))*180/np.pi
cos_rake = n[0]*np.cos(strike*np.pi/180)+n[1]*np.sin(strike*np.pi/180)
if cos_rake < 0:
rake = 180-rake
if strike < 0:
strike = strike+360
if rake < -180:
rake = rake+360
if rake > 180:
rake = rake-360
return (strike,rake,dip)
def find_fault_planes(M,M_dc):
eigVal, eigVec = np.linalg.eig(M_dc)
# sort in ascending order:
dc_eigvec = np.real(np.take(eigVec,np.argsort(eigVal),1))
# Principal axes:
#n = dc_eigvec[:,1]
p = dc_eigvec[:,2]
t = dc_eigvec[:,0]
# str/rake/dip for plane-1
u1 = (1/np.sqrt(2))*(t+p) # slip vector
n1 = (1/np.sqrt(2))*(p-t) # fault normal
# u,n calculations from Vavrycuk (2015) angles.m
if u1[2] > 0: # vertical component is always negative!
u1 = -1*u1
if (np.dot(np.dot(u1.T,M),n1)+np.dot(np.dot(n1.T,M),u1)) < 0:
n1 = -1*n1
str1,rake1,dip1 = find_strike_rake_dip(u1,n1)
# str/rake/dip for plane-2
u2 = n1
n2 = u1
if u2[2] > 0: # vertical component is always negative!
u2 = -1*u2
n2 = -1*n2
str2,rake2,dip2 = find_strike_rake_dip(u2,n2)
#null_axis = dc_eigvec[:,1]
#t_axis = t
#p_axis = p
return ( [(str1,dip1,rake1),(str2,dip2,rake2)] )
def eigen2lune(lam):
"""
Convert eigenvalues to source-type parameters on a lune (Tape and Tape, GJI 2012)
:param eigenvalues: in descending order
"""
lambda_mag = sqrt(lam[0]**2 + lam[1]**2 + lam[2]**2)
if np.sum(lam) != 0:
bdot = np.sum(lam)/(sqrt(3)*lambda_mag)
if bdot > 1:
bdot = 1
elif bdot < -1:
bdot = -1
#bdot[bdot>1] = 1
#bdot[bdot<-1] = -1
delta = 90 - np.arccos(bdot)*180/pi
else:
delta = 0.
if lam[0] == lam[2]:
gamma = 0
else:
gamma = np.arctan((-lam[0] + 2*lam[1] - lam[2]) / (sqrt(3)*(lam[0] - lam[2])))*180/pi
return ([gamma,delta])
def decompose(m,plot=False):
M = np.array([[m[0],m[3],m[4]],
[m[3],m[1],m[5]],
[m[4],m[5],m[2]]])
M *= 1e20 # dyne-cm
# Isotropic part:
M_iso = np.diag(np.array([1. / 3 * np.trace(M),
1. / 3 * np.trace(M),
1. / 3 * np.trace(M)]))
miso = 1. / 3 * np.trace(M)
# Deviatoric part:
M_dev = M - M_iso
# compute eigenvalues and -vectors:
eigVal, _ = np.linalg.eig(M)
eigenvalues = np.real(np.take(eigVal,np.argsort(eigVal)[::-1]))
# deviatoric eigenvalues and -vectors
eigVal, eigVec = np.linalg.eig(M_dev)
# sort in absolute value ascending order:
dev_eigval = np.real(np.take(eigVal,np.argsort(abs(eigVal))))
dev_eigvec = np.real(np.take(eigVec,np.argsort(abs(eigVal)),1))
# Jost and Herrmann, 1989 definition of eigenvalues:
m1 = dev_eigval[0]
# m2 = dev_eigval[1]
m3 = dev_eigval[2] # deviatoric moment
if m3 == 0.: # isotropic only
F = 0.5
else:
F = -1*m1/m3
# Construct Dyadic Description of Vector Dipoles:
a3a3 = np.column_stack([dev_eigvec[:,2],dev_eigvec[:,2],dev_eigvec[:,2]])
a3a3 = a3a3*a3a3.T
a2a2 = np.column_stack([dev_eigvec[:,1],dev_eigvec[:,1],dev_eigvec[:,1]])
a2a2 = a2a2*a2a2.T
a1a1 = np.column_stack([dev_eigvec[:,0],dev_eigvec[:,0],dev_eigvec[:,0]])
a1a1 = a1a1*a1a1.T
M_clvd = m3*F*(2*a3a3-a2a2-a1a1) # CLVD tensor
mclvd = abs(m3)*abs(F)*2
M_dc = m3*(1-2*F)*(a3a3-a2a2) # DC tensor
mdc = abs(m3)*abs(1-2*F)
# Bowers and Hudson moment
mo = abs(miso)+abs(m3) # iso moment + dev moment
mw = (np.log10(mo)-16.05)*2/3
mw_dev = 2*np.log10(mo)/3-10.73
# Calculate percentage
#piso = int(round(abs(miso)/mo*100,6))
#pdc = int(round((1-2*abs(F))*(1-piso/100.)*100,6))
#pclvd = int(100 - piso - pdc)
piso = abs(miso)/mo*100
pdc = mdc/mo*100
pclvd = mclvd/mo*100
# DC Fault planes
fps = find_fault_planes(M,M_dc)
# Find gamma and delta, Tape&Tape lune parameters
lune = eigen2lune(eigenvalues)
res = {'M':[ M[0,0],M[1,1],M[2,2],M[0,1],M[0,2],M[1,2] ],
'eigenvalues':eigenvalues, 'mo':mo, 'mw':mw, 'mw_dev':mw_dev,
'miso':miso, 'mdc':mdc, 'mclvd': mclvd,
'pdc':pdc, 'pclvd':pclvd,'piso':piso, 'fps':fps, 'lune':lune}
if plot:
DEV = [ M_dev[0,0],M_dev[1,1],M_dev[2,2],M_dev[0,1],M_dev[0,2],M_dev[1,2] ]
ISO = [ M_iso[0,0],M_iso[1,1],M_iso[2,2],M_iso[0,1],M_iso[0,2],M_iso[1,2] ]
CLVD = [ M_clvd[0,0],M_clvd[1,1],M_clvd[2,2],M_clvd[0,1],M_clvd[0,2],M_clvd[1,2] ]
res.update({'DEV':DEV, 'ISO':ISO, 'CLVD':CLVD})
return res
def new_page(nsta,nrows,ncols,annot='',offset=2,figsize=(10,8)):
gs = GridSpec(nrows+offset,ncols,hspace=0.5,wspace=0.1)
f = plt.figure(figsize=figsize)
# Annotations and beach balls
ax0 = f.add_subplot(gs[0:offset,:],xlim=(-5,3.55),ylim=(-0.75,0.6),aspect='equal')
ax0.text(-5,0,annot,fontsize=11,verticalalignment='center')
ax0.set_axis_off()
# Waveforms
ax1 = np.empty((nsta,3),dtype=np.object) # create empty axes
for i in range(nsta):
ax1[i,0] = f.add_subplot(gs[i+offset,0])
ax1[i,1] = f.add_subplot(gs[i+offset,1])
ax1[i,2] = f.add_subplot(gs[i+offset,2])
# Adjust axes
for i in range(nsta-1):
adjust_spines(ax1[i,0],['left','bottom'])
for j in range(1,3):
adjust_spines(ax1[i,j],[])
adjust_spines(ax1[-1,0],['left','bottom'])
adjust_spines(ax1[-1,1],['bottom'])
adjust_spines(ax1[-1,2],['bottom'])
# Title
ax1[0,0].set_title('Vertical',verticalalignment='bottom',fontsize=10)
ax1[0,1].set_title('Radial',verticalalignment='bottom',fontsize=10)
ax1[0,2].set_title('Tangential',verticalalignment='bottom',fontsize=10)
return (f,ax0,ax1)
def adjust_spines(ax,spines):
ax.tick_params(direction='in',labelsize=8)
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward',5)) # outward by 5 points
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('time (s)')
else:
ax.xaxis.set_ticks([])
def beach(fm, linewidth=1, facecolor='0.75', bgcolor='w', edgecolor='k',
alpha=1.0, xy=(0, 0), width=200, size=100, nofill=False,
zorder=100, mopad_basis='NED', axes=None, show_iso=False):
"""
Function taken from obspy.imaging.mopad_wrapper, minor modification to
include isotropic components, original source code only handles pure iso.
"""
# initialize beachball
mt = mopad_MomentTensor(fm, system=mopad_basis)
bb = mopad_BeachBall(mt, npoints=size)
## Snippets added by <NAME>
if show_iso:
bb._plot_isotropic_part = True
bb._nodallines_in_NED_system()
##
bb._setup_BB(unit_circle=False)
# extract the coordinates and colors of the lines
radius = width / 2.0
neg_nodalline = bb._nodalline_negative_final_US
pos_nodalline = bb._nodalline_positive_final_US
tension_colour = facecolor
pressure_colour = bgcolor
if nofill:
tension_colour = 'none'
pressure_colour = 'none'
# based on mopads _setup_plot_US() function
# collect patches for the selection
coll = [None, None, None]
coll[0] = patches.Circle(xy, radius=radius)
coll[1] = xy2patch(neg_nodalline[0, :], neg_nodalline[1, :], radius, xy)
coll[2] = xy2patch(pos_nodalline[0, :], pos_nodalline[1, :], radius, xy)
# set the color of the three parts
fc = [None, None, None]
if bb._plot_clr_order > 0:
fc[0] = pressure_colour
fc[1] = tension_colour
fc[2] = tension_colour
if bb._plot_curve_in_curve != 0:
fc[0] = tension_colour
if bb._plot_curve_in_curve < 1:
fc[1] = pressure_colour
fc[2] = tension_colour
else:
coll = [coll[i] for i in (0, 2, 1)]
fc[1] = pressure_colour
fc[2] = tension_colour
else:
fc[0] = tension_colour
fc[1] = pressure_colour
fc[2] = pressure_colour
if bb._plot_curve_in_curve != 0:
fc[0] = pressure_colour
if bb._plot_curve_in_curve < 1:
fc[1] = tension_colour
fc[2] = pressure_colour
else:
coll = [coll[i] for i in (0, 2, 1)]
fc[1] = tension_colour
fc[2] = pressure_colour
if bb._pure_isotropic:
if abs(np.trace(bb._M)) > epsilon:
# use the circle as the most upper layer
coll = [coll[0]]
if bb._plot_clr_order < 0:
fc = [tension_colour]
else:
fc = [pressure_colour]
# transform the patches to a path collection and set
# the appropriate attributes
collection = mpl_collections.PatchCollection(coll, match_original=False)
collection.set_facecolors(fc)
# Use the given axes to maintain the aspect ratio of beachballs on figure
# resize.
if axes is not None:
# This is what holds the aspect ratio (but breaks the positioning)
collection.set_transform(transforms.IdentityTransform())
# Next is a dirty hack to fix the positioning:
# 1. Need to bring the all patches to the origin (0, 0).
for p in collection._paths:
p.vertices -= xy
# 2. Then use the offset property of the collection to position the
# patches
collection.set_offsets(xy)
collection._transOffset = axes.transData
collection.set_edgecolors(edgecolor)
collection.set_alpha(alpha)
collection.set_linewidth(linewidth)
collection.set_zorder(zorder)
return collection
class TDMT(object):
"""
Class for time domain moment tensor inversion, the necessary attributes need to be defined
by reading the input parameter file (e.g. mtinv.in)
"""
def __init__(self,header=None,station=None):
self.header = header
self.station = station
if self.header is not None and self.station is not None:
self._read_green_library() # Read Green's functions (synthetics)
data = self._read_data() # Read data
if self.header.correlate:
self._find_time_shift(data)
d = [ self._data2d(data,shift) for shift in self.shift ]
else:
d = self._data2d(data,self.station.shift)
d = [ d for _ in self.header.depth ] # data array for inversion
self.shift = [ self.station.shift for _ in self.header.depth ]
self.d = d
self._calculate_weights()
else:
print('No headers are defined.')
def run(self):
# Run inversion
self._run_inversion()
# Find solution with maximum VR
val = [ r.get('VR') for r in self.solutions ]
self.best = val.index(max(val)) # Store index of best solution
self.station.shift = self.shift[self.best] # update sample shift
self.d = self.d[self.best] # update data vector
if self.header.weight == 2: # update weights if using inverse variance
self.stawei = self.stawei[self.best]
else:
self.stawei = self.stawei[0]
def write(self):
# Write solutions to files
self._write_solutions2list() # depth search results, best solution with max VR on top
self._write_best2text() # write station-specific results for best solution
if self.header.plot:
self.plot()
def plot(self):
# Plot waveform fits and focal mechanisms of best solution
solution = self.solutions[self.best]
Gm = np.dot(self.G[self.best],solution['a'])
stavr = [ '%.0f'%val if val else '' for val in solution['staVR'] ]
#time = self.npts*self.dt
annot = '\n'.join(['Depth = %s km'%self.header.depth[self.best],
'Mo = %.2E dyne-cm'%solution['mo'],
'Mw = %.2f'%solution['mw'],
'Percent DC/CLVD/ISO = %d/%d/%d'%(solution['pdc'],solution['pclvd'],solution['piso']),
'VR = %.2f%%'%solution['VR']])
# Beach balls TURN 2 FUNCTION
if self.header.degree == 5:
fm = (solution['M'], solution['fps'][0], solution['CLVD'])
width = (1, 0.01*solution['pdc'],0.01*solution['pclvd'])
fmtxt = ('Deviatoric','DC','CLVD')
fmx = (1-0.01*solution['pdc']*0.25, 2.5-0.01*solution['pclvd']*0.5)
elif self.header.degree == 6:
fm = (solution['M'],solution['DEV'],solution['ISO'])
width = (1, 0.01*(solution['pdc']+solution['pclvd']), 0.01*solution['piso'])
fmtxt = ('Full','Deviatoric','ISO')
fmx = (1-0.01*(solution['pdc']+solution['pclvd'])*0.25, 2.5-0.01*solution['piso']*0.5)
nrows = 6
ncols = 3
a = self.station.nsta/nrows
nPages = int(a) + ((int(a) - a) !=0 )
lst = list(range(0,self.station.nsta,nrows))
lst.append(self.station.nsta)
pages = (range(lst[i],lst[i+1]) for i in range(nPages))
x = 0.55*np.sin(self.station.azimuth*np.pi/180)
y = 0.55*np.cos(self.station.azimuth*np.pi/180)
syntcol = np.empty(self.station.used.shape,dtype='<U5')
syntcol[self.station.used==1] = 'green'
syntcol[self.station.used==0] = '0.5'
datacol = np.empty(self.station.used.shape,dtype='<U5')
datacol[self.station.used==1] = 'black'
datacol[self.station.used==0] = '0.5'
for page, group in enumerate(pages):
#tmax = np.max(time[group])
f, ax0, ax1 = new_page(len(group),nrows+1,ncols,annot=annot) #+1 for beach ball
ax0.text(fmx[0],0,'=',horizontalalignment='center',verticalalignment='center')
ax0.text(fmx[1],0,'+',horizontalalignment='center',verticalalignment='center')
for i in range(self.station.nsta):
if np.sum(self.station.used[i,:]):
ax0.plot(x[i],y[i],marker=(3,0,-self.station.azimuth[i]),color='green',zorder=101)
else:
ax0.plot(x[i],y[i],marker=(3,0,-self.station.azimuth[i]),color='0.5',zorder=101)
for i in range(len(fm)):
beach1 = beach(fm[i],xy=(i+0.5*i,0),width=width[i],show_iso=True)
ax0.add_collection(beach1)
ax0.text(i+0.5*i,0.55,fmtxt[i],horizontalalignment='center')
for i, stat in enumerate(group):
t = np.arange(0,self.station.npts[stat]*self.station.dt[stat],self.station.dt[stat])
data = np.reshape(self.d[self.station.index1[stat,0]:self.station.index2[stat,-1]],
(self.station.ncomp,self.station.npts[stat]))
synt = np.reshape(Gm[self.station.index1[stat,0]:self.station.index2[stat,-1]],
(self.station.ncomp,self.station.npts[stat]))
ymin = np.min([data,synt])
ymax = np.max([data,synt])
for j in range(self.station.ncomp):
ax1[i,j].plot(t,data[j,:],color=datacol[stat,j])
ax1[i,j].plot(t,synt[j,:],color=syntcol[stat,j],dashes=[6,2])
ax1[i,j].set_ylim(ymin,ymax)
ax1[i,j].set_xlim(0,t[-1])
ax1[i,0].set_yticks([ymin,0,ymax])
ax1[i,0].set_yticklabels(['%.2e'%ymin, '0', '%.2e'%ymax])
ax1[i,0].text(0,ymax,
self.station.name[stat],
fontsize=10,fontweight='bold',verticalalignment='top')
ax1[i,1].text(0,ymin,
r'($\mathit{r},\varphi$)=(%-.2f,%-.0f)'%(
self.station.distance[stat],self.station.azimuth[stat]),
fontsize=8,verticalalignment='bottom')
ax1[i,2].text(0,ymin,'%d,%s'%(
self.station.shift[stat],stavr[stat]),fontsize=8,verticalalignment='bottom')
f.savefig('figure%d.pdf'%(page))
plt.close(f)
def _read_green_library(self,precision=4):
options = {'HERRMANN': ['_green_herrmann',('ss','ds','dd','ex')],
'TENSOR': ['_green_tensor',('xx','xy','xz','yy','yz','zz')]}
greenlist = [ ''.join([c,suffix]) for suffix in options[self.header.greentype][1]
for c in self.header.component ]
G = [ None for _ in self.header.depth ]
green = [ None for _ in self.header.depth ]
for i, depth in enumerate(self.header.depth):
call_green_method = getattr(self,options[self.header.greentype][0])
gg = call_green_method(depth,greenlist,precision)
G[i] = gg[0]
if self.header.correlate:
green[i] = gg[1]
self.G = G
self.green = green
def _green_tensor(self,depth,greenlist,precision):
depth_str = '{:.{prec}f}'.format(depth,prec=precision)
gg = dict.fromkeys(greenlist,None)
G = np.zeros((np.sum(self.station.ncomp*self.station.npts),self.header.degree),dtype=np.float)
v = [ getattr(self.station,k) for k in ('name','npts','index1','index2') ]
for stat,npts,b,e in zip(*v):
file = '%s/%s.%s'%(self.header.greendir,stat,depth_str)
for suffix in greenlist:
gg[suffix] = read('%s.%s'%(file,suffix))[0].data[0:npts]
# Read vertical then (if 3-C) horizontals
# Construct the six basis functions (elemntary moment tensors)
# Reference: Kikuchi and Kanamori,1991 (BSSA)
for c,ii,jj in zip(self.header.component,b,e):
G[ii:jj,0] = gg[c+'xy']
G[ii:jj,1] = gg[c+'xx'] - gg[c+'yy']
G[ii:jj,2] = gg[c+'yz']
G[ii:jj,3] = gg[c+'xz']
G[ii:jj,4] = -gg[c+'xx'] + gg[c+'zz']
if self.header.degree == 6:
G[ii:jj,5] = gg[c+'xx'] + gg[c+'yy'] + gg[c+'zz']
return (G, None)
def _green_herrmann(self,depth,greenlist,precision):
"""
Load all station Green's functions
"""
depth_str = '{:.{prec}f}'.format(depth,prec=precision)
gg = dict.fromkeys(greenlist,None)
green = np.zeros((np.sum(self.station.ncomp*self.station.npts),self.header.degree-2),dtype=np.float)
G = np.zeros((np.sum(self.station.ncomp*self.station.npts),self.header.degree),dtype=np.float)
v = [ getattr(self.station,k) for k in ('name','npts','azimuth','index1','index2') ]
for stat,npts,azimuth,b,e in zip(*v):
file = '%s/%s.%s'%(self.header.greendir,stat,depth_str)
alpha = azimuth*(pi/180)
for suffix in greenlist:
if suffix == 'tdd' or suffix == 'tex':
gg[suffix] = np.zeros((npts,))
else:
gg[suffix] = read('%s.%s'%(file,suffix))[0].data[0:npts]
# Constrcut Green's function vector using equations 6, 7 and 8from <NAME> Dreger, 2008 (GJI)
# Some signs are flipped to match sign convention of basis Green's functions from RB Herrmann 2002,
# Appendix B of Computer Programs in Seismology.
# http://www.eas.slu.edu/eqc/eqccps.html
# Vertical components
G[b[0]:e[0],1] = gg['zss']*np.sin(2*alpha) # mxy
G[b[0]:e[0],2] = gg['zds']*np.cos(alpha) # mxz
G[b[0]:e[0],4] = gg['zds']*np.sin(alpha) # myz
if self.header.degree == 5:
G[b[0]:e[0],0] = 0.5*gg['zss']*np.cos(2*alpha) - 0.5*gg['zdd'] # mxx
G[b[0]:e[0],3] = -0.5*gg['zss']*np.cos(2*alpha) - 0.5*gg['zdd'] # myy
elif self.header.degree == 6:
G[b[0]:e[0],0] = ( 0.5*gg['zss']*np.cos(2*alpha) - 0.166667*gg['zdd'] +
0.33333*gg['zex'] ) # mxx
G[b[0]:e[0],3] = ( -0.5*gg['zss']*np.cos(2*alpha) - 0.166667*gg['zdd'] +
0.33333*gg['zex'] ) # myy
G[b[0]:e[0],5] = 0.33333*gg['zdd'] + 0.33333*gg['zex'] # mzz
# Read horizontals
if self.header.component == 'zrt':
G[b[2]:e[2],1] = -gg['tss']*np.cos(2*alpha) # mxy
G[b[1]:e[1],1] = gg['rss']*np.sin(2*alpha)
G[b[2]:e[2],2] = gg['tds']*np.sin(alpha) # mxz
G[b[1]:e[1],2] = gg['rds']*np.cos(alpha)
G[b[2]:e[2],4] = -gg['tds']*np.cos(alpha) # myz
G[b[1]:e[1],4] = gg['rds']*np.sin(alpha)
G[b[2]:e[2],0] = 0.5*gg['tss']*np.sin(2*alpha) # mxx
G[b[2]:e[2],3] = -0.5*gg['tss']*np.sin(2*alpha) # myy
if self.header.degree == 5:
G[b[1]:e[1],0] = 0.5*gg['rss']* | np.cos(2*alpha) | numpy.cos |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import scipy.sparse as sp
from scipy.special import binom
import yaml
import copy
import warnings
import os
import time
from .Hamiltonians import DisplacedAnharmonicOscillator, PolymerVibrations, Polymer, DiagonalizeHamiltonian, LadderOperators
from .general_Liouvillian_classes import LiouvillianConstructor
class OpenPolymer(Polymer,LiouvillianConstructor):
def __init__(self,site_energies,site_couplings,dipoles):
"""Extends Polymer object to an open systems framework,
using the Lindblad formalism to describe bath coupling
"""
super().__init__(site_energies,site_couplings,dipoles)
# Values that need to be set
self.optical_dephasing_gamma = 0
self.optical_relaxation_gamma = 0
self.site_to_site_dephasing_gamma = 0
self.site_to_site_relaxation_gamma = 0
self.exciton_relaxation_gamma = 0
self.exciton_exciton_dephasing_gamma = 0
self.kT = 0
def optical_dephasing_operator(self):
total_deph = self.occupied_list[0].copy()
for i in range(1,len(self.occupied_list)):
total_deph += self.occupied_list[i]
return total_deph
def optical_dephasing_instructions(self):
O = self.optical_dephasing_operator()
gamma = self.optical_dephasing_gamma
return self.make_Lindblad_instructions(gamma,O)
def optical_dephasing_Liouvillian(self):
instructions = self.optical_dephasing_instructions()
return self.make_Liouvillian(instructions)
def boltzmann_factors(self,E1,E2):
if E1 == E2:
return 0.5,0.5
if E1 < E2:
return self.boltzmann_factors_ordered_inputs(E1,E2)
else:
E1_to_E2, E2_to_E1 = self.boltzmann_factors_ordered_inputs(E2,E1)
return E2_to_E1, E1_to_E2
def boltzmann_factors_ordered_inputs(self,E1,E2):
"""E1 must be less than E2"""
if self.kT == 0:
return 1, 0
Z = np.exp(-E1/self.kT) + np.exp(-E2/self.kT)
if np.isclose(Z,0):
E2_to_E1 = 1
E1_to_E2 = 0
else:
E2_to_E1 = np.exp(-E1/self.kT)/Z
E1_to_E2 = np.exp(-E2/self.kT)/Z
return E2_to_E1, E1_to_E2
def optical_relaxation_instructions(self):
eg = 0
ins_list = []
gamma = self.optical_relaxation_gamma
for n in range(len(self.energies)):
en = self.energies[n]
bg, bn = self.boltzmann_factors(eg,en)
O = self.up_list[n]
instructions2 = self.make_Lindblad_instructions(gamma * bg,O.T)
ins_list += instructions2
if np.isclose(bn,0):
pass
else:
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
ins_list += instructions1
return ins_list
def optical_relaxation_Liouvillian(self):
inst_list = self.optical_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_relaxation_instructions(self):
nm = itertools.combinations(range(len(self.energies)),2)
i = 0
ins_list = []
gamma = self.site_to_site_relaxation_gamma
for n,m in nm:
en = self.energies[n]
em = self.energies[m]
bn,bm = self.boltzmann_factors(en,em)
O = self.exchange_list[i]
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
instructions2 = self.make_Lindblad_instructions(gamma * bm,O.T)
ins_list += instructions1
ins_list += instructions2
i+=1
return ins_list
def site_to_site_relaxation_Liouvillian(self):
inst_list = self.site_to_site_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_dephasing_operator_list(self):
s_deph_list = []
for (i,j) in itertools.combinations(range(self.num_sites),2):
s_deph_list.append(self.occupied_list[i] - self.occupied_list[j])
return s_deph_list
def all_site_dephasing_instructions(self):
s_deph_list = self.site_to_site_dephasing_operator_list()
Lindblad_instruction_list = []
gamma = self.site_to_site_dephasing_gamma
for O in s_deph_list:
Lindblad_instruction_list += self.make_Lindblad_instructions(gamma,O)
return Lindblad_instruction_list
def all_site_dephasing_Liouvillian(self):
inst_list = self.all_site_dephasing_instructions()
L = self.make_Liouvillian(inst_list)
return L/(2*self.num_sites)
def set_electronic_dissipation_instructions(self):
inst_list = []
if self.optical_dephasing_gamma != 0:
inst_list += self.optical_dephasing_instructions()
if self.site_to_site_dephasing_gamma != 0:
inst_list += self.all_site_dephasing_instructions()
if self.site_to_site_relaxation_gamma != 0:
inst_list += self.site_to_site_relaxation_instructions()
if self.optical_relaxation_gamma != 0:
inst_list += self.optical_relaxation_instructions()
self.electronic_dissipation_instructions = inst_list
def make_manifold_hamiltonian_instructions(self,ket_manifold,bra_manifold):
Hket = self.get_electronic_hamiltonian(manifold_num = ket_manifold)
Hbra = self.get_electronic_hamiltonian(manifold_num = bra_manifold)
return self.make_commutator_instructions2(-1j*Hket,-1j*Hbra)
def make_total_Liouvillian(self):
drho = self.make_Liouvillian(self.make_manifold_hamiltonian_instructions('all','all'))
if self.num_sites > 1:
drho += self.all_exciton_dephasing_Liouvillian()
drho += self.exciton_relaxation_Liouvillian()
# drho += self.optical_relaxation_Liouvillian()
drho += self.optical_dephasing_Liouvillian()
self.L = drho
def eigfun(self,L,*,check_eigenvectors = True,invert = True,populations_only = False):
eigvals, eigvecs = np.linalg.eig(L)
eigvals = np.round(eigvals,12)
sort_indices = eigvals.argsort()
eigvals.sort()
eigvecs = eigvecs[:,sort_indices]
for i in range(eigvals.size):
max_index = np.argmax(np.abs(eigvecs[:,i]))
if np.real(eigvecs[max_index,i]) < 0:
eigvecs[:,i] *= -1
if eigvals[i] == 0:
# eigenvalues of 0 correspond to thermal distributions,
# which should have unit trace in the Hamiltonian space
if populations_only:
trace_norm = eigvecs[:,i].sum()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
else:
shape = int(np.sqrt(eigvals.size))
trace_norm = eigvecs[:,i].reshape(shape,shape).trace()
if np.isclose(trace_norm,0):
pass
else:
eigvecs[:,i] = eigvecs[:,i] / trace_norm
if invert:
eigvecs_left = np.linalg.pinv(eigvecs)
else:
eigvals_left, eigvecs_left = np.linalg.eig(L.T)
eigvals_left = np.round(eigvals_left,12)
sort_indices_left = eigvals_left.argsort()
eigvals_left.sort()
eigvecs_left = eigvecs_left[:,sort_indices_left]
eigvecs_left = eigvecs_left.T
for i in range(eigvals_left.size):
norm = np.dot(eigvecs_left[i,:],eigvecs[:,i])
eigvecs_left[i,:] *= 1/norm
if check_eigenvectors:
LV = L.dot(eigvecs)
D = eigvecs_left.dot(LV)
if np.allclose(D,np.diag(eigvals),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))
self.eigenvalues = eigvals
self.eigenvectors = {'left':eigvecs_left,'right':eigvecs}
return eigvals, eigvecs, eigvecs_left
def save_L(self,dirname):
save_npz(os.path.join(dirname,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.base_path,'L.npz'),**self.L_by_manifold)
def save_eigsystem(self,dirname):
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds = self.eigenvectors['right'])
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds = self.eigenvectors['left'])
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds = self.eigenvalues)
def save_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu.shape[0])
mu_ket = np.kron(self.mu,II.T)
mu_bra = np.kron(II,self.mu.T)
mu_mask_tol = 10
mu_ket_t = np.dot(np.dot(evl,mu_ket),ev)
mu_ket_3d = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d[:,:,0] = mu_ket_t
mu_bra_t = np.dot(np.dot(evl,mu_bra),ev)
mu_bra_3d = np.zeros((mu_bra_t.shape[0],mu_bra_t.shape[0],3),dtype='complex')
mu_bra_3d[:,:,0] = mu_bra_t
if mask:
ket_mask = np.zeros(mu_ket_t.shape,dtype='bool')
ket_mask[:,:] = np.round(mu_ket_t,mu_mask_tol)[:,:]
mu_ket_t_masked = mu_ket_t * ket_mask
mu_ket_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d_masked[:,:,0] = mu_ket_t_masked
bra_mask = np.zeros(mu_bra_t.shape,dtype='bool')
bra_mask[:,:] = np.round(mu_bra_t,mu_mask_tol)[:,:]
mu_bra_t_masked = mu_bra_t * bra_mask
mu_bra_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_bra_3d_masked[:,:,0] = mu_bra_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket=ket_mask,bra=bra_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket=mu_ket_3d_masked,bra=mu_bra_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(self.mu_ket_up.T,II.T)
mu_bra_up = np.kron(II,self.mu_ket_up)
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_t = np.dot(np.dot(evl,mu_ket_up),ev)
mu_ket_up_3d = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up_t
mu_bra_up_t = np.dot(np.dot(evl,mu_bra_up),ev)
mu_bra_up_3d = np.zeros((mu_bra_up_t.shape[0],mu_bra_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up_t
mu_ket_down_t = np.dot(np.dot(evl,mu_ket_down),ev)
mu_ket_down_3d = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down_t
mu_bra_down_t = np.dot(np.dot(evl,mu_bra_down),ev)
mu_bra_down_3d = np.zeros((mu_bra_down_t.shape[0],mu_bra_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down_t
if mask:
ket_up_mask = np.zeros(mu_ket_up_t.shape,dtype='bool')
ket_up_mask[:,:] = np.round(mu_ket_up_t,mu_mask_tol)[:,:]
mu_ket_up_t_masked = mu_ket_up_t * ket_up_mask
mu_ket_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d_masked[:,:,0] = mu_ket_up_t_masked
bra_up_mask = np.zeros(mu_bra_up_t.shape,dtype='bool')
bra_up_mask[:,:] = np.round(mu_bra_up_t,mu_mask_tol)[:,:]
mu_bra_up_t_masked = mu_bra_up_t * bra_up_mask
mu_bra_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d_masked[:,:,0] = mu_bra_up_t_masked
ket_down_mask = np.zeros(mu_ket_down_t.shape,dtype='bool')
ket_down_mask[:,:] = np.round(mu_ket_down_t,mu_mask_tol)[:,:]
mu_ket_down_t_masked = mu_ket_down_t * ket_down_mask
mu_ket_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d_masked[:,:,0] = mu_ket_down_t_masked
bra_down_mask = np.zeros(mu_bra_down_t.shape,dtype='bool')
bra_down_mask[:,:] = np.round(mu_bra_down_t,mu_mask_tol)[:,:]
mu_bra_down_t_masked = mu_bra_down_t * bra_down_mask
mu_bra_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d_masked[:,:,0] = mu_bra_down_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket_up=ket_up_mask,bra_up=bra_up_mask,
ket_down=ket_down_mask,bra_down=bra_down_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket_up=mu_ket_up_3d_masked,
bra_up=mu_bra_up_3d_masked,ket_down=mu_ket_down_3d_masked,
bra_down=mu_bra_down_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu_site_basis(self,dirname):
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(self.mu_ket_up.T,II.T)
mu_bra_up = np.kron(II,self.mu_ket_up)
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_3d = np.zeros((mu_ket_up.shape[0],mu_ket_up.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up
mu_bra_up_3d = np.zeros((mu_bra_up.shape[0],mu_bra_up.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up
mu_ket_down_3d = np.zeros((mu_ket_down.shape[0],mu_ket_down.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down
mu_bra_down_3d = np.zeros((mu_bra_down.shape[0],mu_bra_down.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down
np.savez(os.path.join(dirname,'mu_site_basis.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
class OpenPolymerVibrations(OpenPolymer):
def __init__(self,yaml_file,*,mask_by_occupation_num=True,force_detailed_balance=False,for_RKE=False):
"""Initial set-up is the same as for the Polymer class, but I also need
to unpack the vibrational_frequencies, which must be passed as a nested list.
Each site may have N vibrational modes, and each has a frequency, a displacement
and a frequency shift for the excited state
for sites a, b, ...
"""
with open(yaml_file) as yamlstream:
params = yaml.load(yamlstream,Loader=yaml.SafeLoader)
self.base_path = os.path.split(yaml_file)[0]
self.save_path = os.path.join(self.base_path,'open')
os.makedirs(self.save_path,exist_ok=True)
super().__init__(params['site_energies'],params['site_couplings'],np.array(params['dipoles']))
self.H_diagonalization_time = 0
self.L_diagonalization_time = 0
self.L_construction_time = 0
self.truncation_size = params['initial truncation size']
try:
self.maximum_manifold = params['maximum_manifold']
except:
self.maximum_manifold = np.inf
self.maximum_manifold = min(self.maximum_manifold,self.num_sites)
self.params = params
self.set_bath_coupling()
if self.optical_relaxation_gamma != 0:
self.manifolds_separable = False
else:
self.manifolds_separable = True
self.set_electronic_dissipation_instructions()
self.occupation_num_mask = mask_by_occupation_num
self.set_vibrations()
self.set_vibrational_ladder_operators()
e_ham = self.extract_electronic_subspace(self.electronic_hamiltonian,0,self.maximum_manifold)
self.total_hamiltonian = np.kron(e_ham,self.vibrational_identity)
self.add_vibrations()
t0 = time.time()
self.set_H_eigsystem_by_manifold()
self.H_diagonalization_time = time.time() - t0
self.make_condon_mu()
self.make_condon_mu_dict()
if force_detailed_balance:
H_eigentransform = True
t0 = time.time()
self.all_instructions = self.make_commutator_instructions(-1j*self.total_hamiltonian)
self.set_L_by_manifold(H_eigentransform=H_eigentransform,add_eigenstate_relaxation_effects = False)
self.add_eigenstate_relaxation_effects()
self.add_eigenstate_optical_dephasing_effects()
self.L_construction_time = time.time() - t0
else:
H_eigentransform = False
t0 = time.time()
self.all_instructions = self.convert_electronic_instructions_to_full_instructions(self.electronic_dissipation_instructions)
self.all_instructions += self.make_commutator_instructions(-1j*self.total_hamiltonian)
self.all_instructions += self.vibrational_dissipation_instructions()
if self.manifolds_separable:
self.set_L_by_manifold(H_eigentransform=H_eigentransform)
else:
self.set_L()
self.L_construction_time = time.time() - t0
if for_RKE:
self.set_mu_by_manifold(H_eigentransform=H_eigentransform,L_eigentransform=False)
self.save_mu_by_manifold(pruned=False)
self.save_L_by_manifold()
self.save_rho0(H_eigentransform=H_eigentransform)
else:
t0 = time.time()
if self.manifolds_separable:
self.set_eigensystem_by_manifold(force_detailed_balance = force_detailed_balance)
self.set_mu_by_manifold(H_eigentransform=H_eigentransform)
self.save_mu_by_manifold(pruned=True)
self.save_eigensystem_by_manifold()
self.L_diagonalization_time = time.time() - t0
else:
self.set_eigensystem()
# self.set_mu()
# self.save_mu(pruned=True)
# self.save_eigensystem()
# self.L_diagonalization_time = time.time() - t0
self.save_timings()
def save_timings(self):
save_dict = {'H_diagonalization_time':self.H_diagonalization_time,
'L_diagonalization_time':self.L_diagonalization_time,
'L_construction_time':self.L_construction_time}
np.savez(os.path.join(self.save_path,'Liouvillian_timings.npz'),**save_dict)
def set_H_eigsystem_by_manifold(self):
self.H_eigenvalues = []
self.H_eigenvectors = []
for i in range(self.maximum_manifold+1):
e,v = np.linalg.eigh(self.extract_vibronic_manifold(self.total_hamiltonian,i))
for i in range(e.size):
max_ind = np.argmax(np.abs(v[:,i]))
if v[max_ind,i] < 0:
v[:,i] = v[:,i] * -1
self.H_eigenvalues.append(e)
self.H_eigenvectors.append(v)
def save_rho0(self,*,H_eigentransform=False):
H_size = self.H_eigenvalues[0].size
if H_size == 1:
rho0 = np.array([[1]])
elif self.kT == 0:
rho0 = np.zeros((H_size,H_size))
rho0[0,0] = 1
else:
Z = np.sum(np.exp(-self.H_eigenvalues[0]/self.kT))
rho0_diag = np.exp(-self.H_eigenvalues[0]/self.kT)/Z
rho0 = np.diag(rho0_diag)
if H_eigentransform:
# Already in the eigenbasis
pass
else:
# Go back to original basis
v = self.H_eigenvectors[0]
rho0 = v.dot(rho0.dot(v.T))
rho0 = rho0.flatten()
np.save(os.path.join(self.base_path,'rho0.npy'),rho0)
def save_L(self):
save_npz(os.path.join(self.save_path,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.save_path,'L.npz'),**self.L_by_manifold)
def eigfun2(self,ket_manifold_num,bra_manifold_num,*,check_eigenvectors = True):
key = str(ket_manifold_num) + str(bra_manifold_num)
L = self.L_by_manifold[key]
E = L.diagonal().copy()
V = np.eye(E.size,dtype='complex')
VL = V.copy()
if ket_manifold_num == bra_manifold_num:
size = self.H_eigenvalues[ket_manifold_num].size
pop_inds = np.arange(size)*(size+1)
L_pop = L[pop_inds,:]
L_pop = L_pop[:,pop_inds]
e, v, vl = self.eigfun(L_pop,populations_only=True)
E[pop_inds] = e[:]
for i,j in zip(pop_inds,range(len(pop_inds))):
V[pop_inds,i] = v[:,j]
VL[pop_inds,i] = vl[:,j]
if check_eigenvectors:
LV = L.dot(V)
D = VL.dot(LV)
if np.allclose(D,np.diag(E),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(E)))))
self.eigenvalues = E
self.eigenvectors = {'left':VL,'right':V}
return E,V,VL
def vibrational_occupation_to_indices(self,vibration,occ_num,manifold_num):
single_mode_occ = np.arange(self.truncation_size)
vib_occ = self.vibrational_vector_of_ones_kron(vibration,single_mode_occ)
masked_single_mode_occ = vib_occ[self.vibrational_mask]
electronic_manifold_hamiltonian = self.get_electronic_hamiltonian(manifold_num = manifold_num)
elec_size = electronic_manifold_hamiltonian.shape[0]
masked_single_mode_occ = np.kron(np.ones(elec_size),masked_single_mode_occ)
return np.where(masked_single_mode_occ == occ_num)[0]
def electronic_occupation_to_indices(self,site_num,manifold_num):
single_mode_occ = np.arange(2)
elec_occ = self.electronic_vector_of_ones_kron(site_num,single_mode_occ)
mask = self.electronic_manifold_mask(manifold_num)
masked_elec_occ = elec_occ[mask]
masked_elec_occ = np.kron(masked_elec_occ,np.ones(self.vibrational_mask[0].size))
return np.where(masked_elec_occ == 1)[0]
def get_vibrational_relaxation_rates(self,manifold_num):
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
for n in range(self.num_vibrations):
if j > i:
rates[i,j] += self.single_vibrational_relaxation_rate(i,j,n,manifold_num)
return rates
def single_vibrational_relaxation_rate(self,i,j,vibration,manifold_num):
vi = self.H_eigenvectors[manifold_num][:,i]
vj = self.H_eigenvectors[manifold_num][:,j]
rate = 0
for k in range(self.truncation_size):
k_inds = self.vibrational_occupation_to_indices(vibration,k,manifold_num)
kp1_inds = self.vibrational_occupation_to_indices(vibration,k+1,manifold_num)
for k_ind,kp1_ind in zip(k_inds,kp1_inds):
rate = rate + np.abs(vi[k_ind])**2 * np.abs(vj[kp1_ind])**2*np.sqrt(k+1)
return rate
def get_electronic_relaxation_rates(self,a,b,manifold_num):
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
if j > i:
rates[i,j] += self.single_electronic_relaxation_rate(i,j,a,b,manifold_num)
return rates
def get_all_electronic_relaxation_rates(self,manifold_num):
"""Treats all sites as having the same relaxation rates
"""
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
if j > i:
for a in range(len(self.energies)):
Ea = self.energies[a]
for b in range(len(self.energies)):
Eb = self.energies[b]
if Eb > Ea:
rates[i,j] += self.single_electronic_relaxation_rate(i,j,a,b,manifold_num)
return rates
def get_all_relaxation_rates(self,manifold_num):
rates = self.vibrational_gamma * self.get_vibrational_relaxation_rates(manifold_num)
rates = rates + self.site_to_site_relaxation_gamma * self.get_all_electronic_relaxation_rates(manifold_num)
return rates
def all_eigenstate_relaxation_instructions_by_manifold(self,manifold_num):
rates = self.get_all_relaxation_rates(manifold_num)
E = self.H_eigenvalues[manifold_num]
ins = []
for i in range(rates.shape[0]):
for j in range(rates.shape[1]):
if j > i:
O = np.zeros(rates.shape)
O[i,j] = 1
down, up = self.boltzmann_factors(E[i],E[j])
down = down * rates[i,j]
up = up * rates[i,j]
ins += self.make_Lindblad_instructions(down,O)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions(up,O.T)
return ins
def all_eigenstate_relaxation_instructions_by_coherence(self,ket_manifold_num,bra_manifold_num):
if ket_manifold_num == bra_manifold_num:
return self.all_eigenstate_relaxation_instructions_by_manifold(ket_manifold_num)
ket_rates = self.get_all_relaxation_rates(ket_manifold_num)
E_ket = self.H_eigenvalues[ket_manifold_num]
bra_rates = self.get_all_relaxation_rates(bra_manifold_num)
E_bra = self.H_eigenvalues[bra_manifold_num]
ins = []
Obra = np.zeros(bra_rates.shape)
for i in range(ket_rates.shape[0]):
for j in range(ket_rates.shape[1]):
if j > i:
Oket = np.zeros(ket_rates.shape)
Oket[i,j] = 1
down,up = self.boltzmann_factors(E_ket[i],E_ket[j])
down = down * ket_rates[i,j]
up = up * ket_rates[i,j]
ins += self.make_Lindblad_instructions2_Obra0(down,Oket,Obra)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions2_Obra0(up,Oket.T,Obra)
Oket = np.zeros(ket_rates.shape)
for i in range(bra_rates.shape[0]):
for j in range(bra_rates.shape[1]):
if j > i:
Obra = np.zeros(bra_rates.shape)
Obra[i,j] = 1
down,up = self.boltzmann_factors(E_bra[i],E_bra[j])
down = down * bra_rates[i,j]
up = up * bra_rates[i,j]
ins += self.make_Lindblad_instructions2_Oket0(down,Oket,Obra)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions2_Oket0(up,Oket,Obra.T)
return ins
def single_electronic_relaxation_rate(self,i,j,a,b,manifold_num):
vi = self.H_eigenvectors[manifold_num][:,i]
vj = self.H_eigenvectors[manifold_num][:,j]
a_inds = self.electronic_occupation_to_indices(a,manifold_num)
b_inds = self.electronic_occupation_to_indices(b,manifold_num)
rate = np.sum(np.abs(vi[a_inds])**2) * np.sum(np.abs(vj[b_inds])**2)
return rate
def make_eigenstate_relaxation_Lindblad_all_rates(self,rates,manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
E = self.H_eigenvalues[manifold_num]
size = E.size
pop_inds = np.arange(size)*(size+1)
pop_subspace = np.zeros((pop_inds.size,pop_inds.size))
L_diagonal = np.zeros((size,size))
for i in range(size):
for j in range(size):
if j > i:
down,up = self.boltzmann_factors(E[i],E[j])
down = down * rates[i,j]
up = up * rates[i,j]
pop_subspace[j,j] += -0.5*down
pop_subspace[i,j] += 0.5*down
pop_subspace[i,i] += -0.5*up
pop_subspace[j,i] += 0.5*up
L_diagonal[j,:] += -0.25*down
L_diagonal[:,j] += -0.25*down
L_diagonal[j,j] += -0.5*down
L_diagonal[i,:] += -0.25*up
L_diagonal[:,i] += -0.25*up
L_diagonal[i,i] += -0.5*up
L_total = np.diag(L_diagonal.ravel())
for i,j in zip(pop_inds,np.arange(pop_inds.size)):
L_total[i,pop_inds] = pop_subspace[j,:]
return L_total
def make_eigenstate_relaxation_Lindblad_all_rates_by_coherence(self,ket_rates,bra_rates,ket_manifold_num,bra_manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
if ket_manifold_num == bra_manifold_num:
return self.make_eigenstate_relaxation_Lindblad_all_rates(ket_rates,ket_manifold_num)
E_ket = self.H_eigenvalues[ket_manifold_num]
E_bra = self.H_eigenvalues[bra_manifold_num]
ket_size = E_ket.size
bra_size = E_bra.size
L_diagonal = np.zeros((ket_size,bra_size))
for i in range(ket_size):
for j in range(ket_size):
if j > i:
down,up = self.boltzmann_factors(E_ket[i],E_ket[j])
down = down * ket_rates[i,j]
up = up * ket_rates[i,j]
L_diagonal[j,:] += -0.25*down
L_diagonal[i,:] += -0.25*up
for i in range(bra_size):
for j in range(bra_size):
if j > i:
down,up = self.boltzmann_factors(E_bra[i],E_bra[j])
down = down * bra_rates[i,j]
down = down * bra_rates[i,j]
L_diagonal[:,j] += -0.25*down
L_diagonal[:,i] += -0.25*up
L_total = np.diag(L_diagonal.ravel())
return L_total
def add_eigenstate_relaxation_effects(self):
for k in range(self.maximum_manifold+1):
rates_k = self.get_all_relaxation_rates(k)
for l in range(self.maximum_manifold+1):
rates_l = self.get_all_relaxation_rates(l)
key = str(k) + str(l)
L = self.L_by_manifold[key]
L += self.make_eigenstate_relaxation_Lindblad_all_rates_by_coherence(rates_k,rates_l,k,l)
def add_eigenstate_optical_dephasing_effects(self):
for k in range(self.maximum_manifold+1):
for l in range(self.maximum_manifold+1):
if k == l:
pass
else:
key = str(k) + str(l)
L = self.L_by_manifold[key]
L += self.make_eigenstate_optical_dephasing_Lindblad(k,l)
def make_eigenstate_relaxation_Lindblad(self,gamma,i,j,manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
size = self.H_eigenvalues[manifold_num].size
pop_inds = np.arange(size)*(size+1)
pop_subspace = np.zeros((pop_inds.size,pop_inds.size))
pop_subspace[j,j] = -0.5
pop_subspace[i,j] = 0.5
L_diagonal = np.zeros((size,size))
L_diagonal[j,:] = -0.25
L_diagonal[:,j] = -0.25
L_diagonal[j,j] = -0.5
L_total = np.diag(L_diagonal.ravel())
for i,j in zip(pop_inds,np.arange(pop_inds.size)):
L_total[i,pop_inds] = pop_subspace[j,:]
return gamma*L_total
def make_eigenstate_relaxation_Lindblad_optical_coherence(self,gamma,i,j,ket_manifold_num,bra_manifold_num,*,
relaxation_in_ket = True):
"""From j to i. Factor of 0.25 matches my previous definition of Lindblad formalism"""
ket_size = self.H_eigenvalues[ket_manifold_num].size
bra_size = self.H_eigenvalues[bra_manifold_num].size
L_diagonal = np.zeros((ket_size,bra_size))
if relaxation_in_ket:
L_diagonal[j,:] = -0.25
else:
L_diagonal[:,j] = -0.25
L_total = np.diag(L_diagonal.ravel())
return gamma*L_total
def make_eigenstate_optical_dephasing_Lindblad(self,ket_manifold_num,bra_manifold_num):
"""Use a constant dephasing rate for all states: my best idea is to
createe the dephasing Lindblad for the electronic space only, and use it to
fill in a single rate on the diagonal of the Liouvillian. The trick is to get
dephasing between the nth and n+kth manifold right, when k > 1 (k = 1 is simply
gamma)"""
opt_deph = self.optical_dephasing_Liouvillian().diagonal().reshape(self.electronic_hamiltonian.shape)
opt_deph = self.extract_coherence(opt_deph,ket_manifold_num,bra_manifold_num).ravel()
if np.allclose(opt_deph[0],opt_deph):
pass
else:
raise Exception('All optical dephasing rates are not the same, unknown error')
ket_size = self.H_eigenvalues[ket_manifold_num].size
bra_size = self.H_eigenvalues[bra_manifold_num].size
opt_deph = | np.ones((ket_size,bra_size),dtype='complex') | numpy.ones |
import cv2
import numpy as np
# 肤色检测,返回只保留肤色的图像
def get_skin_hsv(img):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
back = np.zeros(img.shape, np.uint8)
(h0, s0, v0) = cv2.split(hsv_img)
(x, y) = h0.shape
for i in range(x):
for j in range(y):
if (h0[i][j] > 0) and (h0[i][j] < 40) and (s0[i][j] > 60) and (s0[i][j] < 255) and (v0[i][j] > 70) and (v0[i][j] < 255):
back[i][j] = img[i][j]
img[i][j] = 0
return back
def get_skin_yuv(img):
ycrcb_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
(y, cr, cb) = cv2.split(ycrcb_img)
(x, y) = cr.shape
back = np.zeros(img.shape, np.uint8)
for i in range(x):
for j in range(y):
if (cr[i][j] > 144.5) and (cr[i][j] < 200) and (cb[i][j] > 67) and (cb[i][j] < 136):
back[i][j]=img[i][j]
img[i][j]=0
return back
# 提升对比度,采用直方图正规化
def imageHistNormalization(I):
'''
直方图正规化
将Imin和Imax之间的像素值映射到Omin到Omax也就是0-255的区间
Imin到Imax的区间范围肯定是小于或等于Omin到Omax的
正规化之后不用再进行截断操作了
'''
Imax = np.max(I)
Imin = | np.min(I) | numpy.min |
from typing import Union
from sklearn.neighbors import NearestNeighbors
from scipy.sparse import csr_matrix
import numpy as np
from scipy.stats import mode
from anndata import AnnData
import pandas as pd
from anndata import AnnData
from .utils import vecfld_from_adata
from ..preprocessing.utils import pca_monocle
from ..tools.clustering import (
hdbscan,
leiden,
louvain,
infomap,
)
from ..tools.Markov import (
velocity_on_grid,
grid_velocity_filter,
prepare_velocity_grid_data,
)
from ..dynamo_logger import main_info
from ..utils import LoggerManager, copy_adata
from .scVectorField import SvcVectorField
def cluster_field(
adata,
basis="pca",
features=["speed", "potential", "divergence", "acceleration", "curvature", "curl"],
add_embedding_basis=True,
embedding_basis=None,
normalize=False,
method="leiden",
cores=1,
copy=False,
**kwargs,
):
"""Cluster cells based on vector field features.
We would like to see whether the vector field can be used to better define cell state/types. This can be accessed
via characterizing critical points (attractor/saddle/repressor, etc.) and characteristic curves (nullcline,
separatrix). However, the calculation of those is not easy, for example, a strict definition of an attractor is
states where velocity is 0 and the eigenvalue of the jacobian matrix at that point is all negative. Under this
strict definition, we may sometimes find the attractors are very far away from our sampled cell states which makes
them less meaningful although this can be largely avoided when we decide to remove the density correction during the
velocity projection. This is not unexpected as the vector field we learned is defined via a set of basis functions
based on gaussian kernels and thus it is hard to satisfy that strict definition.
Fortunately, we can handle this better with the help of a different set of ideas. Instead of using critical points
by the classical dynamic system methods, we can use some machine learning approaches that are based on extracting
geometric features of streamline to "cluster vector field space" for define cell states/type. This requires
calculating, potential (ordered pseudotime), speed, curliness, divergence, acceleration, curvature, etc. Thanks to
the fact that we can analytically calculate Jacobian matrix matrix, those quantities of the vector field function
can be conveniently and efficiently calculated.
Parameters
----------
adata: :class:`~anndata.AnnData`.
adata object that includes both newly synthesized and total gene expression of cells. Alternatively,
the object should include both unspliced and spliced gene expression of cells.
basis: `str` or None (default: `None`)
The space that will be used for calculating vector field features. Valid names includes, for example, `pca`,
`umap`, etc.
embedding_basis: `str` or None (default: `None`)
The embedding basis that will be combined with the vector field feature space for clustering.
normalize: `bool` (default: `False`)
Whether to mean center and scale the feature across all cells.
method: `str` (default: `leiden`)
The method that will be used for clustering, one of `{'kmeans'', 'hdbscan', 'louvain', 'leiden'}`. If `louvain`
or `leiden` used, you need to have `cdlib` installed.
cores: `int` (default: 1)
The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
copy:
Whether to return a new deep copy of `adata` instead of updating `adata` object passed in arguments.
kwargs:
Any additional arguments that will be passed to either kmeans, hdbscan, louvain or leiden clustering algorithms.
Returns
-------
"""
logger = LoggerManager.gen_logger("dynamo-cluster_field")
logger.log_time()
adata = copy_adata(adata) if copy else adata
if method in ["louvain", "leiden"]:
try:
from cdlib import algorithms
"leiden" in dir(algorithms)
except ImportError:
raise ImportError(
"You need to install the excellent package `cdlib` if you want to use louvain or leiden "
"for clustering."
)
features = list(
set(features).intersection(["speed", "potential", "divergence", "acceleration", "curvature", "curl"])
)
if len(features) < 1:
raise ValueError(
"features has to be selected from ['speed', 'potential', 'divergence', 'acceleration', "
f"'curvature', 'curl']. your feature is {features}"
)
feature_key = [
"speed_" + basis,
basis + "_ddhodge_potential",
"divergence_" + basis,
"acceleration_" + basis,
"curvature_" + basis,
"curl_" + basis,
]
feature_list = [i + "_" + basis if i != "potential" else basis + "_ddhodge_" + i for i in features]
if feature_key[0] not in adata.obs.keys() and feature_key[0] in feature_list:
from ..vectorfield import speed
speed(adata, basis=basis)
if feature_key[1] not in adata.obs.keys() and feature_key[1] in feature_list:
from ..ext import ddhodge
ddhodge(adata, basis=basis)
if feature_key[2] not in adata.obs.keys() and feature_key[2] in feature_list:
from ..vectorfield import divergence
divergence(adata, basis=basis)
if feature_key[3] not in adata.obs.keys() and feature_key[3] in feature_list:
from ..vectorfield import acceleration
acceleration(adata, basis=basis)
if feature_key[4] not in adata.obs.keys() and feature_key[4] in feature_list:
from ..vectorfield import curvature
curvature(adata, basis=basis)
if feature_key[5] not in adata.obs.keys() and feature_key[5] in feature_list:
from ..vectorfield import curl
curl(adata, basis=basis)
feature_data = adata.obs.loc[:, feature_list].values
if embedding_basis is None:
embedding_basis = basis
if add_embedding_basis:
X = np.hstack((feature_data, adata.obsm["X_" + embedding_basis]))
else:
X = feature_data
if normalize:
# X = (X - X.min(0)) / X.ptp(0)
X = (X - X.mean(0)) / X.std(0)
if method in ["hdbscan", "kmeans"]:
if method == "hdbscan":
key = "field_hdbscan"
hdbscan(adata, X_data=X, result_key=key, **kwargs)
elif method == "kmeans":
from sklearn.cluster import KMeans
key = "field_kmeans"
kmeans = KMeans(random_state=0, **kwargs).fit(X)
adata.obs[key] = kmeans.labels_.astype("str")
# clusters need to be categorical variables
adata.obs[key] = adata.obs.obs[key].astype("category")
elif method in ["louvain", "leiden", "infomap"]:
if X.shape[0] > 200000 and X.shape[1] > 2:
from pynndescent import NNDescent
nbrs = NNDescent(
X,
metric="euclidean",
n_neighbors=31,
n_jobs=cores,
random_state=19491001,
)
nbrs_idx, dist = nbrs.query(X, k=31)
else:
nbrs = NearestNeighbors(n_neighbors=31, n_jobs=cores).fit(X)
dist, nbrs_idx = nbrs.kneighbors(X)
row = np.repeat(nbrs_idx[:, 0], 30)
col = nbrs_idx[:, 1:].flatten()
graph = csr_matrix(
(np.repeat(1, len(col)), (row, col)),
shape=(adata.n_obs, adata.n_obs),
)
adata.obsp["vf_feature_knn"] = graph
if method == "leiden":
leiden(
adata,
adj_matrix_key="vf_feature_knn",
result_key="field_leiden",
)
elif method == "louvain":
louvain(
adata,
adj_matrix_key="vf_feature_knn",
result_key="field_louvain",
)
elif method == "infomap":
infomap(
adata,
adj_matrix_key="vf_feature_knn",
result_key="field_infomap",
)
logger.finish_progress(progress_name="clustering_field")
if copy:
return adata
return None
def streamline_clusters(
adata: AnnData,
basis: str = "umap",
features: list = ["speed", "divergence", "acceleration", "curvature", "curl"],
method: str = "sparsevfc",
xy_grid_nums: list = [50, 50],
density: float = 5,
curvature_method: int = 1,
feature_bins: int = 10,
clustering_method: str = "leiden",
assign_fixedpoints: bool = False,
reversed_fixedpoints: bool = False,
**kwargs,
):
"""
Parameters
----------
adata
basis
features
method
xy_grid_nums
density
curvature_method
feature_bins
clustering_method
Returns
-------
"""
import matplotlib.pyplot as plt
if method in ["louvain", "leiden"]:
try:
from cdlib import algorithms
"leiden" in dir(algorithms)
except ImportError:
raise ImportError(
"You need to install the excellent package `cdlib` if you want to use louvain or leiden "
"for clustering."
)
vf_dict, func = vecfld_from_adata(adata, basis=basis)
grid_kwargs_dict = {
"density": None,
"smooth": None,
"n_neighbors": None,
"min_mass": None,
"autoscale": False,
"adjust_for_stream": True,
"V_threshold": None,
}
if method.lower() == "sparsevfc":
X, V = adata.obsm["X_" + basis], adata.obsm["velocity_" + basis]
X_grid, p_mass, neighs, weight = prepare_velocity_grid_data(
X,
xy_grid_nums,
density=grid_kwargs_dict["density"],
smooth=grid_kwargs_dict["smooth"],
n_neighbors=grid_kwargs_dict["n_neighbors"],
)
for i in ["density", "smooth", "n_neighbors"]:
grid_kwargs_dict.pop(i)
V_emb = func(X)
V_grid = (V_emb[neighs] * weight[:, :, None]).sum(1) / np.maximum(1, p_mass)[:, None]
X_grid, V_grid = grid_velocity_filter(
V_emb=V,
neighs=neighs,
p_mass=p_mass,
X_grid=X_grid,
V_grid=V_grid,
**grid_kwargs_dict,
)
elif method.lower() == "gaussian":
X_grid, V_grid, D = velocity_on_grid(
vf_dict["X"],
vf_dict["Y"],
xy_grid_nums,
cut_off_velocity=True,
**grid_kwargs_dict,
)
else:
raise ValueError(f"only `sparsevfc` and `gaussian` method supported")
strm = plt.streamplot(
X_grid[0],
X_grid[1],
V_grid[0],
V_grid[1],
density=density,
)
strm_res = strm.lines.get_segments() # get streamline segements
# split segments into different streamlines
line_list_ori = {}
line_ind = 0
for i, seg in enumerate(strm_res):
if i == 0:
line_list_ori[0] = [seg]
else:
# the second point from the previous segment should be the same from the first point in the current segment
if all(strm_res[i - 1][1] == seg[0]):
line_list_ori[line_ind].append(seg)
else:
line_ind += 1
line_list_ori[line_ind] = [seg]
line_list = line_list_ori.copy()
# convert to list of numpy arrays.
for key, values in line_list_ori.items():
line_list_ori[key] = np.array(values).reshape((-1, 2))
# remove duplicated rows from the numpy arrays.
for key, values in line_list.items():
line_list[key] = np.unique(np.array(values).reshape((-1, 2)), axis=0)
vector_field_class = SvcVectorField()
vector_field_class.from_adata(adata, basis=basis)
has_acc = True if "acceleration" in features else False
has_curv = True if "curvature" in features else False
has_div = True if "divergence" in features else False
has_speed = True if "speed" in features else False
has_curl = True if "curl" in features else False
if has_acc:
acc_dict = {}
if has_curv:
cur_1_dict = {}
cur_2_dict = {}
if has_div:
div_dict = {}
if has_speed:
speed_dict = {}
if has_curl:
curl_dict = {}
# save features along the streameline and create histogram for each feature
bins = feature_bins # number of feature bins
line_len = []
feature_df = np.zeros((len(line_list), len(features) * bins))
for key, values in line_list.items():
line_len.append(values.shape[0])
tmp = None
if has_acc:
acceleration_val, acceleration_vec = vector_field_class.compute_acceleration(values)
acc_dict[key] = acceleration_val
_, acc_hist = np.histogram(acceleration_val, bins=(bins - 1), density=True)
if tmp is None:
tmp = acc_hist
if has_curv:
curvature_val_1 = vector_field_class.compute_curvature(values, formula=1)[0]
cur_1_dict[key] = curvature_val_1
curvature_val_2, curvature_vec = vector_field_class.compute_curvature(values)
cur_2_dict[key] = curvature_val_2
_, cur_1_hist = np.histogram(curvature_val_1, bins=(bins - 1), density=True)
_, cur_2_hist = np.histogram(curvature_val_2, bins=(bins - 1), density=True)
if tmp is None:
tmp = cur_1_hist if curvature_method == 1 else cur_2_hist
else:
tmp = np.hstack((tmp, cur_1_hist if curvature_method == 1 else cur_2_hist))
if has_div:
divergence_val = vector_field_class.compute_divergence(values)
div_dict[key] = divergence_val
_, div_hist = np.histogram(divergence_val, bins=(bins - 1), density=True)
if tmp is None:
tmp = div_hist
else:
tmp = np.hstack((tmp, div_hist))
if has_speed:
speed_vec = vector_field_class.func(values)
speed_val = np.linalg.norm(speed_vec)
speed_dict[key] = speed_val
_, speed_hist = np.histogram(speed_val, bins=(bins - 1), density=True)
if tmp is None:
tmp = speed_hist
else:
tmp = np.hstack((tmp, speed_hist))
if has_curl:
curl_val = vector_field_class.compute_curl(values)
curl_dict[key] = curl_val
_, curl_hist = np.histogram(curl_val, bins=(bins - 1), density=True)
if tmp is None:
tmp = curl_hist
else:
tmp = np.hstack((tmp, curl_hist))
feature_df[key, :] = tmp
# clustering
feature_adata = AnnData(feature_df)
pca_monocle(feature_adata, X_data=feature_df, pca_key="X_pca")
if clustering_method == "louvain":
louvain(feature_adata, obsm_key="X_pca")
elif clustering_method == "leiden":
leiden(feature_adata, obsm_key="X_pca")
elif clustering_method == "infomap":
infomap(feature_adata, obsm_key="X_pca")
elif method in ["hdbscan", "kmeans"]:
key = "field_hdbscan"
hdbscan(feature_adata, X_data=feature_df, result_key=key, **kwargs)
elif method == "kmeans":
from sklearn.cluster import KMeans
key = "field_kmeans"
kmeans = KMeans(random_state=0, **kwargs).fit(X)
feature_adata.obs[key] = kmeans.labels_.astype("str")
# clusters need to be categorical variables
feature_adata.obs[key] = adata.obs.obs[key].astype("category")
else:
raise ValueError(
"only louvain, leiden, infomap, hdbscan and kmeans clustering supported but your requested "
f"method is {method}"
)
if assign_fixedpoints or reversed_fixedpoints:
tmp = np.array(strm.lines.get_segments()).reshape((-1, 2))
vector_field_class.data["X"] = np.unique(tmp, axis=0)
if assign_fixedpoints:
(
X,
valid_fps_type_assignment,
assignment_id,
) = vector_field_class.assign_fixed_points(cores=1)
feature_adata.obs["fixed_point"] = -1
if reversed_fixedpoints:
# reverse vector field to identify source:
vector_field_class.func = lambda x: -vector_field_class.func(x)
(
X_rev,
valid_fps_type_assignment_rev,
assignment_id_rev,
) = vector_field_class.assign_fixed_points(cores=1)
feature_adata.obs["rev_fixed_point"] = -1
data_X = vector_field_class.data["X"]
for key, values in line_list.items():
indices = [np.where(np.logical_and(data_X[:, 0] == val[0], data_X[:, 1] == val[1]))[0][0] for val in values]
# assign fixed point to the most frequent point
if assign_fixedpoints:
mode_val = mode(assignment_id[indices])[0][0]
if not | np.isnan(mode_val) | numpy.isnan |
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numpy as np
import cvxpy as cp
from cvxpy.atoms.affine.vstack import vstack
from cvxpy.atoms.elementwise.power import power
from cvxpy.expressions.variable import Variable
from cvxpy.tests.base_test import BaseTest
class TestExpressions(BaseTest):
""" Unit tests for the expression/expression module. """
def setUp(self) -> None:
pass
# Test elementwise power
def test_power(self) -> None:
x = Variable(3)
y = Variable(3)
self.assertFalse(x.is_constant())
self.assertTrue(x.is_affine())
self.assertTrue(x.is_quadratic())
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s = power(x.T @ y, 0)
self.assertTrue(s.is_constant())
self.assertTrue(s.is_affine())
self.assertTrue(s.is_quadratic())
t = power(x-y, 1)
self.assertFalse(t.is_constant())
self.assertTrue(t.is_affine())
self.assertTrue(t.is_quadratic())
u = power(x+2*y, 2)
self.assertFalse(u.is_constant())
self.assertFalse(u.is_affine())
self.assertTrue(u.is_quadratic())
self.assertTrue(u.is_dcp())
w = (x+2*y)**2
self.assertFalse(w.is_constant())
self.assertFalse(w.is_affine())
self.assertTrue(w.is_quadratic())
self.assertTrue(w.is_dcp())
def test_matrix_multiplication(self) -> None:
x = Variable((3, 5))
y = Variable((3, 5))
self.assertFalse(x.is_constant())
self.assertTrue(x.is_affine())
self.assertTrue(x.is_quadratic())
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s = x.T @ y
self.assertFalse(s.is_constant())
self.assertFalse(s.is_affine())
self.assertTrue(s.is_quadratic())
self.assertFalse(s.is_dcp())
def test_quad_over_lin(self) -> None:
x = Variable((3, 5))
y = Variable((3, 5))
z = Variable()
s = cp.quad_over_lin(x-y, z)
self.assertFalse(s.is_constant())
self.assertFalse(s.is_affine())
self.assertFalse(s.is_quadratic())
self.assertTrue(s.is_dcp())
t = cp.quad_over_lin(x+2*y, 5)
self.assertFalse(t.is_constant())
self.assertFalse(t.is_affine())
self.assertTrue(t.is_quadratic())
self.assertTrue(t.is_dcp())
def test_matrix_frac(self) -> None:
x = Variable(5)
M = np.eye(5)
P = M.T @ M
s = cp.matrix_frac(x, P)
self.assertFalse(s.is_constant())
self.assertFalse(s.is_affine())
self.assertTrue(s.is_quadratic())
self.assertTrue(s.is_dcp())
def test_quadratic_form(self) -> None:
x = Variable(5)
P = | np.eye(5) | numpy.eye |
import collections
import fractions
import json
import os
import re
import warnings
import numpy as np # pip3 install numpy
import torch
from scipy import ndimage
import autodisc as ad
warnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()
ROUND = 10
EPS = 0.0001
class SphericPad(torch.nn.Module):
"""Pads spherically the input on all sides with the given padding size."""
def __init__(self, padding_size):
super(SphericPad, self).__init__()
if isinstance(padding_size, int):
self.pad_left = self.pad_right = self.pad_top = self.pad_bottom = padding_size
elif isinstance(padding_size, tuple) and len(padding_size) == 2:
self.pad_left = self.pad_right = padding_size[0]
self.pad_top = self.pad_bottom = padding_size[1]
elif isinstance(padding_size, tuple) and len(padding_size) == 4:
self.pad_left = padding_size[0]
self.pad_top = padding_size[1]
self.pad_right = padding_size[2]
self.pad_bottom = padding_size[3]
else:
raise ValueError('The padding size shoud be: int, tuple of size 2 or tuple of size 4')
def forward(self, input):
output = torch.cat([input, input[:, :, :self.pad_bottom, :]], dim=2)
output = torch.cat([output, output[:, :, :, :self.pad_right]], dim=3)
output = torch.cat([output[:, :, -(self.pad_bottom + self.pad_top):-self.pad_bottom, :], output], dim=2)
output = torch.cat([output[:, :, :, -(self.pad_right + self.pad_left):-self.pad_right], output], dim=3)
return output
def rle2arr(st):
'''
Transforms an RLE string to a numpy array.
Code from <NAME>.
:param st Description of the array in RLE format.
:return Numpy array.
'''
rle_groups = re.findall("(\d*)([p-y]?[.boA-X$])", st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]
code_list = sum([[c] * (1 if n == '' else int(n)) for n, c in rle_groups], []) # [yO yO $ yO]
code_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]
V = [[0 if c in ['.', 'b'] else 255 if c == 'o' else ord(c) - ord('A') + 1 if len(c) == 1 else (ord(c[0]) - ord(
'p')) * 24 + (ord(c[1]) - ord('A') + 25) for c in row if c != ''] for row in code_arr] # [[255 255] [255]]
maxlen = len(max(V, key=len))
A = np.array([row + [0] * (maxlen - len(row)) for row in V]) / 255 # [[1 1] [1 0]]
return A
class Board:
def __init__(self, size=(10,10)):
self.params = {'R':10, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}
self.cells = np.zeros(size)
def clear(self):
self.cells.fill(0)
'''---------------------------------------------------------------
AUTOMATON PYTORCH VERSION
-------------------------------------------------------------------'''
def complex_mult_torch(X, Y):
""" Computes the complex multiplication in Pytorch when the tensor last dimension is 2: 0 is the real component and 1 the imaginary one"""
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
def roll_n(X, axis, n):
""" Rolls a tensor with a shift n on the specified axis"""
f_idx = tuple(slice(None, None, None) if i != axis else slice(0,n,None)
for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n,None,None)
for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front],axis)
class LeniaStepFFT(torch.nn.Module):
""" Module pytorch that computes one Lenia Step with the fft version"""
def __init__(self, R, b, kn, gn, m, s, T, is_soft_clip, is_gpu, size_y, size_x):
super(LeniaStepFFT, self).__init__()
self.R = R
self.T = T
self.dt = float (1.0 / T)
self.b = b
self.kn = kn
self.gn = gn
self.m = m
self.s = s
self.spheric_pad = SphericPad(int(self.R))
self.is_soft_clip = is_soft_clip
self.is_gpu = is_gpu
self.size_y = size_y
self.size_x = size_x
self.compute_kernel()
def compute_kernel(self):
size_y = self.size_y
size_x = self.size_x
# implementation of meshgrid in torch
x = torch.arange(size_x)
y = torch.arange(size_y)
xx = x.repeat(size_y, 1)
yy = y.view(-1,1).repeat(1, size_x)
X = (xx - int(size_x / 2)).float() / float(self.R)
Y = (yy - int(size_y / 2)).float() / float(self.R)
# distance to center in normalized space
D = torch.sqrt(X**2 + Y**2)
# kernel
k = len(self.b)
kr = k * D
bs = torch.tensor([float(f) for f in self.b])
b = bs[torch.min(torch.floor(kr).long(), (k-1)*torch.ones_like(kr).long())]
kfunc = AutomatonPytorch.kernel_core[self.kn - 1]
kernel = (D<1).float() * kfunc(torch.min(kr % 1, torch.ones_like(kr))) * b
kernel_sum = torch.sum(kernel)
# normalization of the kernel
self.kernel_norm = (kernel / kernel_sum).unsqueeze(0).unsqueeze(0)
# fft of the kernel
self.kernel_FFT = torch.rfft(self.kernel_norm, signal_ndim=2, onesided=False)
self.kernel_updated = False
def forward(self, input):
if self.is_gpu:
input = input.cuda()
self.kernel_FFT = self.kernel_FFT.cuda()
self.world_FFT = torch.rfft(input, signal_ndim=2, onesided=False)
self.potential_FFT = complex_mult_torch(self.kernel_FFT, self.world_FFT)
self.potential = torch.irfft(self.potential_FFT, signal_ndim=2, onesided=False)
self.potential = roll_n(self.potential, 3, self.potential.size(3)//2)
self.potential = roll_n(self.potential, 2, self.potential.size(2)//2)
gfunc = AutomatonPytorch.field_func[min(self.gn,2)]
self.field = gfunc(self.potential, self.m, self.s)
if not self.is_soft_clip:
output_img = torch.clamp(input + self.dt * self.field, min=0., max=1.)
else:
output_img = AutomatonPytorch.soft_clip(input + self.dt * self.field, 0, 1, self.T)
return output_img
class LeniaStepConv2d(torch.nn.Module):
""" Module pytorch that computes one Lenia Step with the conv2d version"""
def __init__(self, R, b, kn, gn, m, s, T, is_soft_clip, is_gpu):
super(LeniaStepConv2d, self).__init__()
self.R = R
self.T = T
self.dt = float (1.0 / T)
self.b = b
self.kn = kn
self.gn = gn
self.m = m
self.s = s
self.spheric_pad = SphericPad(int(self.R))
self.is_soft_clip = is_soft_clip
self.is_gpu = is_gpu
self.compute_kernel()
def compute_kernel(self):
size_y = 2 * self.R + 1
size_x = 2 * self.R + 1
# implementation of meshgrid in torch
x = torch.arange(size_x)
y = torch.arange(size_y)
xx = x.repeat(size_y, 1)
yy = y.view(-1,1).repeat(1, size_x)
X = (xx - int(size_x / 2)).float() / float(self.R)
Y = (yy - int(size_y / 2)).float() / float(self.R)
# distance to center in normalized space
D = torch.sqrt(X**2 + Y**2)
# kernel
k = len(self.b)
kr = k * D
bs = torch.tensor([float(f) for f in self.b])
b = bs[torch.min(torch.floor(kr).long(), (k-1)*torch.ones_like(kr).long())]
kfunc = AutomatonPytorch.kernel_core[self.kn - 1]
kernel = (D<1).float() * kfunc(torch.min(kr % 1, torch.ones_like(kr))) * b
kernel_sum = torch.sum(kernel)
# normalization of the kernel
self.kernel_norm = (kernel / kernel_sum).unsqueeze(0).unsqueeze(0)
self.kernel_updated = False
def forward(self, input):
if self.is_gpu:
input = input.cuda()
self.kernel_norm = self.kernel_norm.cuda()
self.potential = torch.nn.functional.conv2d(self.spheric_pad(input), weight = self.kernel_norm)
gfunc = AutomatonPytorch.field_func[self.gn]
self.field = gfunc(self.potential, self.m, self.s)
if not self.is_soft_clip:
output_img = torch.clamp(input + self.dt * self.field, 0, 1) # A_new = A + dt * torch.clamp(D, -A/dt, (1-A)/dt)
else:
output_img = AutomatonPytorch.soft_clip(input + self.dt * self.field, 0, 1, self.T) # A_new = A + dt * Automaton.soft_clip(D, -A/dt, (1-A)/dt, 1)
return output_img
class AutomatonPytorch:
kernel_core = {
0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)
1: lambda r: torch.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)
2: lambda r, q=1/4: (r>=q).float() * (r<=1-q).float(), # step (stpz1/4)
3: lambda r, q=1/4: (r>=q).float() * (r<=1-q).float() + (r<q).float() *0.5 # staircase (life)
}
field_func = {
0: lambda n, m, s: torch.max(torch.zeros_like(n), 1 - (n-m)**2 / (9 * s**2) )**4 * 2 - 1, # polynomial (quad4)
1: lambda n, m, s: torch.exp( - (n-m)**2 / (2 * s**2) ) * 2 - 1, # exponential / gaussian (gaus)
2: lambda n, m, s: (torch.abs(n-m)<=s).float() * 2 - 1 # step (stpz)
}
@staticmethod
def soft_max(x, m, k):
return torch.log(torch.exp(k*x) + torch.exp(k*m)) / k
@staticmethod
def soft_clip(x, min, max, k):
a = torch.exp(k*x)
b = torch.exp(k*min)
c = torch.exp(-k*max)
return torch.log( 1/(a+b)+c ) / -k
def __init__(self, world, version = 'fft', use_gpu = True):
self.world = world
#self.world_FFT = np.zeros(world.cells.shape)
#self.potential_FFT = np.zeros(world.cells.shape)
#self.potential = np.zeros(world.cells.shape)
#self.field = np.zeros(world.cells.shape)
#self.field_old = None
#self.change = np.zeros(world.cells.shape)
self.X = None
self.Y = None
self.D = None
#self.gen = 0
#self.time = 0
self.is_multi_step = False
self.is_soft_clip = False
self.is_inverted = False
self.kn = 1
self.gn = 1
# look if gpu is available
if use_gpu and torch.cuda.is_available():
self.is_gpu = True
else:
self.is_gpu = False
# initialization of the pytorch model to perform one step in Lenia
if version == 'fft':
self.model = LeniaStepFFT(self.world.params['R'], self.world.params['b'], (self.world.params.get('kn') or self.kn), (self.world.params.get('gn') or self.gn), self.world.params['m'], self.world.params['s'], self.world.params['T'], self.is_soft_clip, self.is_gpu, self.world.cells.shape[0], self.world.cells.shape[1])
elif version == 'conv2d':
self.model = LeniaStepConv2d(self.world.params['R'], self.world.params['b'], (self.world.params.get('kn') or self.kn), (self.world.params.get('gn') or self.gn), self.world.params['m'], self.world.params['s'], self.world.params['T'], self.is_soft_clip, self.is_gpu)
else:
raise ValueError('Lenia pytorch automaton step calculation can be done with fft or conv 2d')
if self.is_gpu:
self.model = self.model.cuda()
def calc_once(self):
A = torch.from_numpy(self.world.cells).unsqueeze(0).unsqueeze(0).float()
A_new = self.model(A)
#A = A[0,0,:,:].cpu().numpy()
A_new = A_new[0,0,:,:].cpu().numpy()
#self.change = (A_new - A) / (1/self.world.params['T'])
self.world.cells = A_new
#self.gen += 1
#self.time = round(self.time + (1/self.world.params['T']), ROUND)
def reset(self):
pass
#self.gen = 0
#self.time = 0
#self.field_old = None
class Lenia(ad.core.System):
@staticmethod
def default_config():
def_config = ad.core.System.default_config()
def_config.version = 'pytorch_fft' # reikna_fft, pytorch_fft, pytorch_conv2d
def_config.use_gpu = True
return def_config
@staticmethod
def default_system_parameters():
def_params = ad.core.System.default_system_parameters()
def_params.size_y = 100
def_params.size_x = 100
def_params.R = 13
def_params.T = 10
def_params.b = [1]
def_params.m = 0.15
def_params.s = 0.017
def_params.kn = 1
def_params.gn = 1
def_params.init_state = np.zeros((def_params.size_y, def_params.size_x))
return def_params
def default_statistics(self):
def_stats = super().default_statistics()
def_stats.append(LeniaStatistics(self))
return def_stats
def __init__(self, statistics=None, system_parameters=None, config=None, **kwargs):
super().__init__(statistics=statistics, system_parameters=system_parameters, config=config, **kwargs)
self.run_parameters = None
self.world = None
self.automaton = None
def init_run(self, run_parameters=None):
if run_parameters is None:
self.run_parameters = self.system_parameters
else:
self.run_parameters = {**self.system_parameters, **run_parameters}
self.world = Board((self.run_parameters['size_y'], self.run_parameters['size_x']))
self.world.cells = self.run_parameters["init_state"]
self.world.params = self.run_parameters
if np.min(self.world.cells) < 0 or np.max(self.world.cells) > 1:
raise Warning('The given initial state has values below 0 and\or above 1. It will be clipped to the range [0, 1]')
self.world.cells = np.clip(self.world.cells, 0, 1)
if self.config.version.lower() == 'pytorch_fft':
self.automaton = AutomatonPytorch(self.world, version='fft', use_gpu = self.config.use_gpu)
elif self.config.version.lower() == 'pytorch_conv2d':
self.automaton = AutomatonPytorch(self.world, version='conv2d', use_gpu = self.config.use_gpu)
else:
raise ValueError('Unknown lenia version (config.version = {!r})'.format(self.config.version))
return self.world.cells
def step(self, step_idx):
self.automaton.calc_once()
# for some invalid parameters become the cells nan
# this makes problems with the computation of the statistics
# therefore, assume that all nan cells are 0
self.world.cells[np.isnan(self.world.cells)] = 0
return self.world.cells
def stop(self):
pass
class LeniaStatistics(ad.core.SystemStatistic):
'''Default statistics for the lenia system.'''
DISTANCE_WEIGHT = 2 # 1=linear, 2=quadratic, ...
@staticmethod
def calc_statistic_diff(statistic_names, stat1, stat2, nan_value_diff=1.0, nan_nan_diff=0.0):
if isinstance(stat1, list) or isinstance(stat2, list):
raise NotImplementedError('Difference between statistics given as lists are not implemented!')
if not isinstance(statistic_names, list):
statistic_names = [statistic_names]
stat1 = [stat1]
stat2 = [stat2]
# assume default difference for all
diff = stat1 - stat2
# check if there are angle statistics and calculate there difference appropriately
statistic_names_ndarray = np.array(statistic_names)
angle_statistics_inds = (statistic_names_ndarray == 'activation_center_movement_angle') \
| (statistic_names_ndarray == 'activation_center_movement_angle_mean') \
| (statistic_names_ndarray == 'positive_growth_center_movement_angle') \
| (statistic_names_ndarray == 'positive_growth_center_movement_angle_mean')
for angle_stat_idx in np.where(angle_statistics_inds)[0]:
diff[angle_stat_idx] = ad.helper.misc.angle_difference_degree(stat1[angle_stat_idx], stat2[angle_stat_idx])
# if both statistics are nan, then the difference is nan_nan_diff (default=0)
diff[np.isnan(stat1) & np.isnan(stat2)] = nan_nan_diff
# if one statistic is nan, then the current diff is nan, then use nan_value_diff
diff[np.isnan(diff)] = nan_value_diff
return diff
@staticmethod
def calc_goalspace_distance(points1, points2, config, goal_space_extent=None):
# normalize representations
if goal_space_extent is not None:
points1 = points1 - goal_space_extent[:,0]
points1 = points1 / (goal_space_extent[:,1] - goal_space_extent[:,0])
points2 = points2 - goal_space_extent[:,0]
points2 = points2 / (goal_space_extent[:,1] - goal_space_extent[:,0])
diff = LeniaStatistics.calc_statistic_diff(config.statistics, points1, points2)
if len(diff) == 0:
dist = np.array([])
elif np.ndim(diff) == 1:
dist = np.linalg.norm(diff)
else:
dist = np.linalg.norm(diff, axis=1)
return dist
def __init__(self, system):
super().__init__(system)
# statistics
self.data['is_dead'] = []
self.data['activation_mass'] = []
self.data['activation_mass_mean'] = []
self.data['activation_mass_std'] = []
self.data['activation_volume'] = []
self.data['activation_volume_mean'] = []
self.data['activation_volume_std'] = []
self.data['activation_density'] = []
self.data['activation_density_mean'] = []
self.data['activation_density_std'] = []
self.data['activation_center_position'] = []
self.data['activation_center_velocity'] = []
self.data['activation_center_velocity_mean'] = []
self.data['activation_center_velocity_std'] = []
self.data['activation_center_movement_angle'] = []
self.data['activation_center_movement_angle_mean'] = []
self.data['activation_center_movement_angle_std'] = []
self.data['activation_center_movement_angle_velocity'] = []
self.data['activation_center_movement_angle_velocity_mean'] = []
self.data['activation_center_movement_angle_velocity_std'] = []
self.data['activation_mass_asymmetry'] = []
self.data['activation_mass_asymmetry_mean'] = []
self.data['activation_mass_asymmetry_std'] = []
self.data['activation_mass_distribution'] = []
self.data['activation_mass_distribution_mean'] = []
self.data['activation_mass_distribution_std'] = []
self.data['activation_hu1'] = []
self.data['activation_hu1_mean'] = []
self.data['activation_hu1_std'] = []
self.data['activation_hu2'] = []
self.data['activation_hu2_mean'] = []
self.data['activation_hu2_std'] = []
self.data['activation_hu3'] = []
self.data['activation_hu3_mean'] = []
self.data['activation_hu3_std'] = []
self.data['activation_hu4'] = []
self.data['activation_hu4_mean'] = []
self.data['activation_hu4_std'] = []
self.data['activation_hu5'] = []
self.data['activation_hu5_mean'] = []
self.data['activation_hu5_std'] = []
self.data['activation_hu6'] = []
self.data['activation_hu6_mean'] = []
self.data['activation_hu6_std'] = []
self.data['activation_hu7'] = []
self.data['activation_hu7_mean'] = []
self.data['activation_hu7_std'] = []
self.data['activation_hu8'] = []
self.data['activation_hu8_mean'] = []
self.data['activation_hu8_std'] = []
self.data['activation_flusser9'] = []
self.data['activation_flusser9_mean'] = []
self.data['activation_flusser9_std'] = []
self.data['activation_flusser10'] = []
self.data['activation_flusser10_mean'] = []
self.data['activation_flusser10_std'] = []
self.data['activation_flusser11'] = []
self.data['activation_flusser11_mean'] = []
self.data['activation_flusser11_std'] = []
self.data['activation_flusser12'] = []
self.data['activation_flusser12_mean'] = []
self.data['activation_flusser12_std'] = []
self.data['activation_flusser13'] = []
self.data['activation_flusser13_mean'] = []
self.data['activation_flusser13_std'] = []
self.data['positive_growth_mass'] = []
self.data['positive_growth_mass_mean'] = []
self.data['positive_growth_mass_std'] = []
self.data['positive_growth_volume'] = []
self.data['positive_growth_volume_mean'] = []
self.data['positive_growth_volume_std'] = []
self.data['positive_growth_density'] = []
self.data['positive_growth_density_mean'] = []
self.data['positive_growth_density_std'] = []
self.data['positive_growth_center_position'] = []
self.data['positive_growth_center_velocity'] = []
self.data['positive_growth_center_velocity_mean'] = []
self.data['positive_growth_center_velocity_std'] = []
self.data['positive_growth_center_movement_angle'] = []
self.data['positive_growth_center_movement_angle_mean'] = []
self.data['positive_growth_center_movement_angle_std'] = []
self.data['positive_growth_center_movement_angle_velocity'] = []
self.data['positive_growth_center_movement_angle_velocity_mean'] = []
self.data['positive_growth_center_movement_angle_velocity_std'] = []
self.data['activation_positive_growth_centroid_distance'] = []
self.data['activation_positive_growth_centroid_distance_mean'] = []
self.data['activation_positive_growth_centroid_distance_std'] = []
# other
self.distance_weight_matrix = LeniaStatistics.calc_distance_matrix(system.system_parameters.size_y,
system.system_parameters.size_x)
self.angles_from_middle = None
def reset(self):
# set all statistics to zero
self.data = dict.fromkeys(self.data, [])
def calc_after_run(self, system, all_obs):
'''Calculates the final statistics for lenia observations after a run is completed'''
self.reset()
num_of_obs = len(all_obs)
activation_mass_data = np.ones(num_of_obs) * np.nan
activation_volume_data = np.ones(num_of_obs) * np.nan
activation_density_data = np.ones(num_of_obs) * np.nan
activation_center_position_data = np.ones((num_of_obs, 2)) * np.nan
activation_center_velocity_data = np.ones(num_of_obs) * np.nan
activation_center_movement_angle_data = np.ones(num_of_obs) * np.nan
activation_center_movement_angle_velocity_data = np.ones(num_of_obs) * np.nan
activation_mass_asymmetry_data = np.ones(num_of_obs) * np.nan
activation_mass_distribution_data = np.ones(num_of_obs) * np.nan
activation_hu1_data = np.ones(num_of_obs) * np.nan
activation_hu2_data = np.ones(num_of_obs) * np.nan
activation_hu3_data = np.ones(num_of_obs) * np.nan
activation_hu4_data = np.ones(num_of_obs) * np.nan
activation_hu5_data = np.ones(num_of_obs) * np.nan
activation_hu6_data = np.ones(num_of_obs) * np.nan
activation_hu7_data = np.ones(num_of_obs) * np.nan
activation_hu8_data = np.ones(num_of_obs) * np.nan
activation_flusser9_data = np.ones(num_of_obs) * np.nan
activation_flusser10_data = np.ones(num_of_obs) * np.nan
activation_flusser11_data = np.ones(num_of_obs) * np.nan
activation_flusser12_data = np.ones(num_of_obs) * np.nan
activation_flusser13_data = np.ones(num_of_obs) * np.nan
positive_growth_mass_data = np.ones(num_of_obs) * np.nan
positive_growth_volume_data = np.ones(num_of_obs) * np.nan
positive_growth_density_data = np.ones(num_of_obs) * np.nan
positive_growth_center_position_data = np.ones((num_of_obs, 2)) * np.nan
positive_growth_center_velocity_data = np.ones(num_of_obs) * np.nan
positive_growth_center_movement_angle_data = np.ones(num_of_obs) * np.nan
positive_growth_center_movement_angle_velocity_data = np.ones(num_of_obs) * np.nan
activation_positive_growth_centroid_distance_data = np.ones(num_of_obs) * np.nan
# positive_growth_data = np.ones(num_of_obs) * np.nan
# positive_growth_volume_data = np.ones(num_of_obs) * np.nan
# positive_growth_density_data = np.ones(num_of_obs) * np.nan
size_y = all_obs[0].shape[0]
size_x = all_obs[0].shape[1]
num_of_cells = size_y * size_x
# calc initial center of mass and use it as a reference point to "center" the world around it
# in consequetive steps, recalculate the center of mass and "recenter" the wolrd around them
#mid_y = int((size_y-1) / 2)
#mid_x = int((size_x-1) / 2)
mid_y = (size_y - 1) / 2
mid_x = (size_x - 1) / 2
mid = np.array([mid_y, mid_x])
# prepare the angles of the vectors from the middle point for each point in the env, used to compute the mass asymmetry
# only recompute for first calculation of statistics (self.angles_from_middle is None) or if the observation size changed
if self.angles_from_middle is None or self.angles_from_middle.shape[0] != size_y or self.angles_from_middle.shape[1] != size_x:
self.angles_from_middle = np.ones((size_y,size_x))*np.nan
for y in range(size_y):
for x in range(size_x):
vec = [mid_y-y, x-mid_x]
self.angles_from_middle[y][x] = ad.helper.misc.angle_of_vec_degree([vec[1], vec[0]])
activation_center_of_mass = np.array(LeniaStatistics.center_of_mass(all_obs[0]))
activation_shift_to_center = mid - activation_center_of_mass
init_growth = all_obs[1] - all_obs[0]
positive_growth_center_of_mass = np.array(LeniaStatistics.center_of_mass(init_growth))
positive_growth_shift_to_center = mid - positive_growth_center_of_mass
prev_activation_center_movement_angle = np.nan
prev_positive_growth_center_movement_angle = np.nan
uncentered_activation_center_position = np.array([np.nan, np.nan])
for step in range(len(all_obs)):
activation = all_obs[step]
# uncentered_activation_center_position = np.array(ndimage.measurements.center_of_mass(activation))
#
# # set center to middle if it can not be calculated, for example if all cells are dead
# if np.isnan(uncentered_activation_center_position[0]) or np.isnan(uncentered_activation_center_position[1]) or \
# uncentered_activation_center_position[0] == float('inf') or uncentered_activation_center_position[1] == float('inf'):
# uncentered_activation_center_position = mid.copy()
# shift the system to the last calculated center of mass so that it is in the middle
# the matrix can only be shifted in discrete values, therefore the shift is transformed to integer
centered_activation = np.roll(activation, activation_shift_to_center.astype(int), (0, 1))
# calculate the image moments
activation_moments = ad.helper.statistics.calc_image_moments(centered_activation)
# new center of mass
activation_center_of_mass = np.array([activation_moments.y_avg, activation_moments.x_avg])
# calculate the change of center as a vector
activation_shift_from_prev_center = mid - activation_center_of_mass
# calculate the new shift to center the next obs to the new center
activation_shift_to_center = activation_shift_to_center + activation_shift_from_prev_center
# transform the new center, encoded as a shift from the first image, back into the original image coordinates
uncentered_activation_center_position[0] = (mid_y - activation_shift_to_center[0]) % size_y
uncentered_activation_center_position[1] = (mid_x - activation_shift_to_center[1]) % size_x
activation_center_position_data[step] = uncentered_activation_center_position
# activation mass
activation_mass = activation_moments.m00
activation_mass_data[step] = activation_mass / num_of_cells # activation is number of acitvated cells divided by the number of cells
# activation volume
activation_volume = np.sum(activation > EPS)
activation_volume_data[step] = activation_volume / num_of_cells
# activation density
if activation_volume == 0:
activation_density_data[step] = 0
else:
activation_density_data[step] = activation_mass/activation_volume
# activation moments
activation_hu1_data[step] = activation_moments.hu1
activation_hu2_data[step] = activation_moments.hu2
activation_hu3_data[step] = activation_moments.hu3
activation_hu4_data[step] = activation_moments.hu4
activation_hu5_data[step] = activation_moments.hu5
activation_hu6_data[step] = activation_moments.hu6
activation_hu7_data[step] = activation_moments.hu7
activation_hu8_data[step] = activation_moments.hu8
activation_flusser9_data[step] = activation_moments.flusser9
activation_flusser10_data[step] = activation_moments.flusser10
activation_flusser11_data[step] = activation_moments.flusser11
activation_flusser12_data[step] = activation_moments.flusser12
activation_flusser13_data[step] = activation_moments.flusser13
# get velocity and angle of movement
# distance between the previous center of mass and the new one is the velocity
# angle is computed based on the shift vector
if step <= 0:
activation_center_velocity = np.nan
activation_center_movement_angle = np.nan
activation_center_movement_angle_velocity = np.nan
activation_mass_asymmetry = np.nan
else:
activation_center_velocity = np.linalg.norm(activation_shift_from_prev_center)
if activation_center_velocity == 0:
activation_center_movement_angle = np.nan
else:
activation_center_movement_angle = ad.helper.misc.angle_of_vec_degree([-1 * activation_shift_from_prev_center[1], activation_shift_from_prev_center[0]])
# Angular velocity, is the difference between the current and previous angle of movement
if activation_center_movement_angle is np.nan or prev_activation_center_movement_angle is np.nan:
activation_center_movement_angle_velocity = 0
else:
activation_center_movement_angle_velocity = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, prev_activation_center_movement_angle)
# activation mass asymmetry
# calculate the angle between the center shift and the angle from the center to each point.
# if the angle is < 180 the point is on the right side of the movement
# then use its mass
activation_right_side_mass = 0
if np.isnan(activation_center_movement_angle):
activation_mass_asymmetry = np.nan
else:
if activation_mass == 0 or activation_mass_asymmetry == num_of_cells:
# if all are active or dead then ther is perfect assymetry
activation_mass_asymmetry = 0
else:
# for y in range(size_y):
# for x in range(size_x):
# angle_dist = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, angles_from_middle[y][x])
#
# if angle_dist < 180:
# activation_right_side_mass = activation_right_side_mass + activation[y][x]
angle_dist = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, self.angles_from_middle)
activation_right_side_mass = np.sum(activation[angle_dist < 0])
# activation_mass_asymmetry = right_mass - left_mass = right_mass - (mass - right_mass) = 2*right_mass - mass
activation_mass_asymmetry = (2 * activation_right_side_mass - activation_mass) / activation_mass
prev_activation_center_movement_angle = activation_center_movement_angle
activation_center_velocity_data[step] = activation_center_velocity
activation_center_movement_angle_data[step] = activation_center_movement_angle
activation_center_movement_angle_velocity_data[step] = activation_center_movement_angle_velocity
activation_mass_asymmetry_data[step] = activation_mass_asymmetry
# mass distribution around the center
if activation_mass <= EPS:
activation_mass_distribution = 1.0
else:
activation_mass_distribution = np.sum(self.distance_weight_matrix * centered_activation) / np.sum(centered_activation)
activation_mass_distribution_data[step] = activation_mass_distribution
##########################################################################################################################################
# positive growth statistics
uncentered_positive_growth_center_position = np.array([np.nan, np.nan])
if step <= 0:
positive_growth_mass_data[step] = np.nan
positive_growth_volume_data[step] = np.nan
positive_growth_density_data[step] = np.nan
positive_growth_center_position_data[step] = [np.nan, np.nan]
positive_growth_center_velocity_data[step] = np.nan
positive_growth_center_movement_angle_data[step] = np.nan
positive_growth_center_movement_angle_velocity_data[step] = np.nan
else:
positive_growth = np.clip(all_obs[step] - all_obs[step - 1], 0, 1)
# uncentered_positive_growth_center_position = np.array(StatLenia.center_of_mass(positive_growth))
#
# # set center to middle if it can not be calculated, for example if all cells are dead
# if np.isnan(uncentered_positive_growth_center_position[0]) or np.isnan(uncentered_positive_growth_center_position[1]) or \
# uncentered_positive_growth_center_position[0] == float('inf') or uncentered_positive_growth_center_position[1] == float('inf'):
# uncentered_positive_growth_center_position = mid.copy()
#
# positive_growth_center_position_data[step] = uncentered_positive_growth_center_position
# shift the system to the last calculated center of mass so that it is in the middle
# the matrix can only be shifted in discrete values, therefore the shift is transformed to integer
centered_positive_growth = np.roll(positive_growth, [int(positive_growth_shift_to_center[0]), int(positive_growth_shift_to_center[1])], (0, 1))
# new center of mass
positive_growth_center_of_mass = np.array(LeniaStatistics.center_of_mass(centered_positive_growth))
# calculate the change of center as a vector
positive_growth_shift_from_prev_center = mid - positive_growth_center_of_mass
# calculate the new shift to center the next obs to the new center
positive_growth_shift_to_center = positive_growth_shift_to_center + positive_growth_shift_from_prev_center
# transform the new center, encoded as a shift from the first image, back into the original image coordinates
uncentered_positive_growth_center_position[0] = (mid_y - positive_growth_shift_to_center[0]) % size_y
uncentered_positive_growth_center_position[1] = (mid_x - positive_growth_shift_to_center[1]) % size_x
positive_growth_center_position_data[step] = uncentered_positive_growth_center_position
# growth mass
positive_growth_mass = np.sum(centered_positive_growth)
positive_growth_mass_data[step] = positive_growth_mass / num_of_cells # activation is number of acitvated cells divided by the number of cells
# activation volume
positive_growth_volume = np.sum( centered_positive_growth > EPS )
positive_growth_volume_data[step] = positive_growth_volume / num_of_cells
# activation density
if positive_growth_volume == 0:
positive_growth_density_data[step] = 0
else:
positive_growth_density_data[step] = positive_growth_mass / positive_growth_volume
# get velocity and angle of movement
# distance between the previous center of mass and the new one is the velocity
# angle is computed based on the shift vector
if step <= 1:
positive_growth_center_velocity = np.nan
positive_growth_center_movement_angle = np.nan
positive_growth_center_movement_angle_velocity = np.nan
else:
positive_growth_center_velocity = np.linalg.norm(positive_growth_shift_from_prev_center)
if positive_growth_center_velocity == 0:
positive_growth_center_movement_angle = np.nan
else:
positive_growth_center_movement_angle = ad.helper.misc.angle_of_vec_degree([-1 * positive_growth_shift_from_prev_center[1], positive_growth_shift_from_prev_center[0]])
# Angular velocity, is the difference between the current and previous angle of movement
if positive_growth_center_movement_angle is np.nan or prev_positive_growth_center_movement_angle is np.nan:
positive_growth_center_movement_angle_velocity = 0
else:
positive_growth_center_movement_angle_velocity = ad.helper.misc.angle_difference_degree(positive_growth_center_movement_angle, prev_positive_growth_center_movement_angle)
prev_positive_growth_center_movement_angle = positive_growth_center_movement_angle
positive_growth_center_velocity_data[step] = positive_growth_center_velocity
positive_growth_center_movement_angle_data[step] = positive_growth_center_movement_angle
positive_growth_center_movement_angle_velocity_data[step] = positive_growth_center_movement_angle_velocity
######################################################################################################################
# Growth - Activation centroid distance
if step <= 0:
activation_positive_growth_centroid_distance_data[step] = np.nan
else:
activation_positive_growth_centroid_distance = ad.helper.misc.get_min_distance_on_repeating_2d_array((size_y, size_x), uncentered_activation_center_position, uncentered_positive_growth_center_position)
activation_positive_growth_centroid_distance_data[step] = activation_positive_growth_centroid_distance
is_dead = np.all(all_obs[-1] == 1) or np.all(all_obs[-1] == 0)
self.data['is_dead'] = is_dead
self.data['activation_mass'] = activation_mass_data
self.data['activation_mass_mean'] = np.nanmean(activation_mass_data)
self.data['activation_mass_std'] = np.nanstd(activation_mass_data)
self.data['activation_volume'] = activation_volume_data
self.data['activation_volume_mean'] = np.nanmean(activation_volume_data)
self.data['activation_volume_std'] = np.nanstd(activation_volume_data)
self.data['activation_density'] = activation_density_data
self.data['activation_density_mean'] = np.nanmean(activation_density_data)
self.data['activation_density_std'] = np.nanstd(activation_density_data)
self.data['activation_center_position'] = activation_center_position_data
self.data['activation_center_velocity'] = activation_center_velocity_data
self.data['activation_center_velocity_mean'] = np.nanmean(activation_center_velocity_data)
self.data['activation_center_velocity_std'] = np.nanstd(activation_center_velocity_data)
self.data['activation_center_movement_angle'] = activation_center_movement_angle_data
self.data['activation_center_movement_angle_mean'] = ad.helper.statistics.nan_mean_over_angles_degrees(activation_center_movement_angle_data)
#self.data['activation_center_movement_angle_std'] = np.nanstd(activation_center_movement_angle_data)
self.data['activation_center_movement_angle_velocity'] = activation_center_movement_angle_velocity_data
self.data['activation_center_movement_angle_velocity_mean'] = np.nanmean(activation_center_movement_angle_velocity_data)
self.data['activation_center_movement_angle_velocity_std'] = | np.nanstd(activation_center_movement_angle_velocity_data) | numpy.nanstd |
from typing import Set, Tuple, Dict, List, Any
import h5py
import numpy as np
from itertools import product as combvec
import torch
from torch.utils.data import DataLoader, Dataset
def load_dataset(path: str, parameter_list: Set[str]) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
"""
Loads a dataset from disk.
Args:
path: The path to the hdf5 file.
parameter_list: A dictionary with the key names in the dataset.
Returns:
A dictionary with numpy arrays holding the complete data.
"""
data = {}
with h5py.File(path, 'r') as h5f:
images = h5f['images'][:].astype('float32')
for parameter_name in parameter_list:
data[parameter_name] = h5f['parameter/' + parameter_name][:].astype('float32')
return images, data
def crop_images(images: np.ndarray, crop_size: int) -> np.ndarray:
"""
Crops quadratic images around origin.
Args:
images: an nxmxmx matrix where n is the number of images and m is the size
crop_size: the crop size
Returns:
The image array with the size nxcrop_sizexcrop_size
"""
image_size = images.shape[1]
center = image_size/2
crop_padding = crop_size/2
crop_start_pixel = int(center-crop_padding)
crop_end_pixel = int(center+crop_padding)
return images[:, crop_start_pixel:crop_end_pixel, crop_start_pixel:crop_end_pixel]
def prepare_images_tensorflow(images: np.ndarray) -> np.ndarray:
"""
Prepares an image ndarray to fit into the tensorflow NHWC
Args:
images: The images in an nxmxm format
Returns:
The images in the NHWC tensorflow format.
"""
return images.reshape(*images.shape, 1)
def normalize_complete_images(images: np.ndarray) -> np.ndarray:
"""
Normalize the given images with respect to all images.
Args:
images: THe images in a NHW format.
Returns:
The normalized images in a NHW format.
"""
min_pv = np.min(images)
# shift pixel values to a minimum of 0
images = images - min_pv
# new maximum of the images
max_pv = np.max(images)
# the images values are set to 0-1
return images / max_pv
def normalize_single_images(images: np.ndarray) -> np.ndarray:
"""
Normalize each image individually to a value range between 0 and 1
Args:
images: The images in a NHW format
Returns:
The normalized images in a NHW format.
"""
for idx in range(len(images)):
images[idx] = normalize_complete_images(images[idx])
return images
def create_combination_index(parameter: Dict[str, np.ndarray], uniques: List[str]) -> Tuple[np.ndarray, np.ndarray]:
# the x index of the image
x_index = np.empty(0, dtype=int)
# the y index of the image
y_index = np.empty(0, dtype=int)
unique_iterator, keys = _multi_uniques(parameter, uniques)
number_of_entries = len(parameter[keys[0]])
for unique_point in unique_iterator:
number_of_parameter = len(unique_point)
selection_map = np.arange(number_of_entries)
for idx in range(number_of_parameter):
selection_map = np.intersect1d(selection_map, np.where(parameter[keys[idx]] == unique_point[idx])[0])
for idx in selection_map:
for idy in selection_map:
x_index = np.append(x_index, idx)
y_index = np.append(y_index, idy)
return x_index, y_index
def _multi_uniques(params: Dict[str, np.ndarray], uniques: List[str]) -> Tuple[combvec, List[str]]:
"""
Creates an iterator for all combinations of unique tuples of the uniques values.
Args:
params: All parameters
uniques: he parameters to be uniquely paired
Returns:
(iterator, parameter key list)
"""
unique_values = {}
keys = []
for val in uniques:
unique_values[val] = np.unique(params[val])
keys.append(val)
iterator = combvec(*unique_values.values())
return iterator, keys
## For influence functions' notebooks
# generate dataset (it transforms numpy arrays to torch tensors)
class NumpyToPyTorch_DataLoader(Dataset):
"""Face Landmarks dataset."""
def __init__(self, X, Y, transform=None):
"""
Args:
path
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.X = torch.from_numpy(X).float() # image
# self.Y = torch.from_numpy(Y).float() # label for regression
self.Y = torch.from_numpy(Y).long() # label for classification
# i, j = self.Y.size()[0], self.Y.size()[1]
# self.Y = self.Y.view(i, 1, j)
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, index):
label = self.Y[index]
img = self.X[index]
if self.transform:
img = self.transform(img)
return img, label
class Downloader(object):
def __init__(self, data_name, batch):
self.data_name = data_name
self.batch = batch
# Follow information in README.md to download these datasets and save them to folder 'data'
if self.data_name is 'single_cut_rephased':
self.training_path = './data/validation_single_cut_rephased.h5'
self.testing_path = './data/single_cut_rephased.h5'
self.labels_path = './influence_functions/data_and_masks/validation_single_cut_theoretical_labels.npy'
self.test_labels_path = './influence_functions/data_and_masks/single_cut_theoretical_labels.npy'
self.training_mask_path = './influence_functions/data_and_masks/validation_single_cut_training_mask.npy'
self.validation_mask_path = './influence_functions/data_and_masks/validation_single_cut_validation_mask.npy'
self.testing_mask_path = './influence_functions/data_and_masks/single_cut_test_mask.npy'
self.training_mean = 0.46388384212120864
self.training_std = 0.17022241233110608
if self.data_name is 'single_cut_with_micromotion':
self.training_path = './data/validation_single_cut_56.h5'
self.testing_path = './data/single_cut_56.h5'
self.test_labels_path = './influence_functions/data_and_masks/single_cut_theoretical_labels.npy'
self.labels_path = './influence_functions/data_and_masks/validation_single_cut_theoretical_labels.npy'
self.training_mask_path = './influence_functions/data_and_masks/validation_single_cut_training_mask.npy'
self.validation_mask_path = './influence_functions/data_and_masks/validation_single_cut_validation_mask.npy'
self.testing_mask_path = './influence_functions/data_and_masks/single_cut_test_mask.npy'
self.training_mean = 0.07900989899839451
self.training_std = 0.044015503630440524
if self.data_name is 'phase_diagram_rephased':
self.training_path = './data/phase_diagram_rephased.h5'
self.testing_path = './data/phase_diagram_rephased.h5'
self.labels_path = './influence_functions/data_and_masks/phase_diagram_anomalydetected_labels.npy'
self.test_labels_path = './influence_functions/data_and_masks/phase_diagram_anomalydetected_labels.npy'
self.training_mask_path = './influence_functions/data_and_masks/phase_diagram_training_mask.npy'
self.validation_mask_path = './influence_functions/data_and_masks/phase_diagram_validation_mask.npy'
self.testing_mask_path = './influence_functions/data_and_masks/phase_diagram_test_mask.npy'
self.training_mean = 0.47376012469840956
self.training_std = 0.16998128006793728
def train_loader(self, batch_size = None, shuffle = False):
if batch_size is None:
batch_size = self.batch
exp_data = h5py.File(self.training_path, 'r')
data = exp_data['images']
labels = np.load(self.labels_path)
data_size = data.shape[0]
# Load the training mask
mask = np.load(self.training_mask_path)
data = np.array(data) # h5py does not support fancy indexing, like masks
self.training_data = data[mask]
self.train_samples_no = self.training_data.shape[0]
# Zerocenter normalization (with training data's mean and std)
# To avoid the leak of data
self.training_data = (self.training_data - self.training_mean) / self.training_std
self.training_labels = labels[mask]
train_set = NumpyToPyTorch_DataLoader(self.training_data, self.training_labels)
train_loader = DataLoader(train_set,
batch_size = batch_size,
shuffle = False,
num_workers = 1,
pin_memory = True # CUDA only, this lets your DataLoader allocate the samples in page-locked memory, which speeds-up the transfer from CPU to GPU during training
)
return train_loader#, mask - it's always the same, thanks to the fixed random seed, so I needed to save it only once
def test_loader(self, batch_size = None):
if batch_size is None:
batch_size = self.batch
exp_data = h5py.File(self.testing_path, 'r')
data = exp_data['images']
labels = np.load(self.test_labels_path)
data_size = data.shape[0]
# Load the test mask
mask = np.load(self.testing_mask_path)
data = np.array(data) # h5py does not support fancy indexing, like masks
self.test_data = data[mask]
self.test_labels = labels[mask]
self.test_samples_no = self.test_data.shape[0]
# Zerocenter normalization (with training data's mean and std)
# To avoid the leak of data
self.test_data = (self.test_data - self.training_mean) / self.training_std
test_set = NumpyToPyTorch_DataLoader(self.test_data, self.test_labels)
test_loader = DataLoader(test_set,
batch_size = batch_size,
shuffle = False,
num_workers = 1,
pin_memory=True # CUDA only
)
return test_loader
def validation_loader(self, batch_size = None):
if batch_size is None:
batch_size = self.batch
exp_data = h5py.File(self.training_path, 'r')
data = exp_data['images']
labels = np.load(self.labels_path)
data_size = data.shape[0]
# Load the validation mask
mask = np.load(self.validation_mask_path)
data = | np.array(data) | numpy.array |
"""
Central module for calculating the tidal amplitudes, phases, etc.
"""
import numpy as np
from ._time_conversion import _normalize_time
from .confidence import _confidence
from .constituent_selection import ut_cnstitsel
from .diagnostics import _PE, _SNR, ut_diagn
from .ellipse_params import ut_cs2cep
from .harmonics import ut_E
from .robustfit import robustfit
from .utilities import Bunch
default_opts = {
"constit": "auto",
"order_constit": None,
"conf_int": "linear",
"method": "ols",
"trend": True,
"phase": "Greenwich",
"nodal": True,
"infer": None,
"MC_n": 200,
"Rayleigh_min": 1,
"robust_kw": {"weight_function": "cauchy"},
"white": False,
"verbose": True,
"epoch": None,
}
def _process_opts(opts, is_2D):
newopts = Bunch(default_opts)
newopts.update_values(strict=True, **opts)
# TODO: add more validations.
newopts.infer = validate_infer(newopts.infer, is_2D)
snr = newopts.conf_int != "none"
newopts.order_constit = validate_order_constit(newopts.order_constit, snr)
compat_opts = _translate_opts(newopts)
return compat_opts
def _translate_opts(opts):
# Temporary shim between new-style options and Matlab heritage.
# Here or elsewhere, proper validation remains to be added.
oldopts = Bunch()
oldopts.cnstit = opts.constit
oldopts.ordercnstit = opts.order_constit
oldopts.infer = opts.infer # we will not use the matlab names, though
oldopts.conf_int = True
if opts.conf_int == "linear":
oldopts.linci = True
elif opts.conf_int == "MC":
oldopts.linci = False
elif opts.conf_int == "none":
oldopts.conf_int = False
oldopts.nodiagn = 1
else:
raise ValueError("'conf_int' must be 'linear', 'MC', or 'none'")
oldopts.notrend = not opts.trend
oldopts["nodesatlint"] = False
oldopts["nodesatnone"] = False
oldopts["gwchlint"] = False
oldopts["gwchnone"] = False
if opts.nodal == "linear_time":
oldopts["nodsatlint"] = True
elif not opts.nodal:
oldopts["nodsatnone"] = True
if opts.phase == "linear_time":
oldopts["gwchlint"] = True
elif opts.phase == "raw":
oldopts["gwchnone"] = True
# Otherwise it should be default, 'Greenwich.'
oldopts.rmin = opts.Rayleigh_min
oldopts.white = opts.white
oldopts.newopts = opts # So we can access new opts via the single "opt."
oldopts["RunTimeDisp"] = opts.verbose
oldopts.epoch = opts.epoch
return oldopts
def validate_infer(infer, is_2D):
if infer is None or infer == "none":
return None
required_keys = {"inferred_names", "reference_names", "amp_ratios", "phase_offsets"}
keys = set(infer.keys())
if keys < required_keys:
raise ValueError("infer option must include %s" % required_keys)
nI = len(infer.inferred_names)
if len(infer.reference_names) != nI:
raise ValueError("inferred_names must be same" " length as reference_names")
nratios = 2 * nI if is_2D else nI
if len(infer.amp_ratios) != nratios or len(infer.phase_offsets) != nratios:
raise ValueError("ratios and offsets need to have length %d" % nratios)
if "approximate" not in infer:
infer.approximate = False
return infer
def validate_order_constit(arg, have_snr):
available = ["PE", "frequency"]
if have_snr:
available.append("SNR")
if arg is None:
return "PE"
if isinstance(arg, str) and arg in available:
return arg
if not isinstance(arg, str) and np.iterable(arg):
return arg # TODO: add checking of its elements
raise ValueError(
f"order_constit must be one of {available} or"
f" a sequence of constituents, not '{arg}'",
)
def solve(t, u, v=None, lat=None, **opts):
"""
Calculate amplitude, phase, confidence intervals of tidal constituents.
Parameters
----------
t : array_like
Time in days since `epoch`, or np.datetime64 array, or pandas datetime array.
u : array_like
Sea-surface height, velocity component, etc.
v : {None, array_like}, optional
If `u` is a velocity component, `v` is the orthogonal component.
lat : float, required
Latitude in degrees.
epoch : {string, `datetime.date`, `datetime.datetime`}, if datenum is provided in t.
Default `None` if `t` is `datetime`, `np.datetime64`, or `pd.datetime array.`
Optional valid strings are
- 'python' : if `t` is days since '0000-12-31'
- 'matlab' : if `t` is days since '0000-00-00'
Or, an arbitrary date in the form 'YYYY-MM-DD'.
constit : {'auto', sequence}, optional
List of strings with standard letter abbreviations of
tidal constituents; or 'auto' to let the list be determined
based on the time span.
conf_int : {'linear', 'MC', 'none'}, optional
If not 'none' (string), calculate linearized confidence
intervals, or use a Monte-Carlo simulation.
method : {'ols', 'robust'}, optional
Solve with ordinary least squares, or with a robust algorithm.
trend : bool, optional
True (default) to include a linear trend in the model.
phase : {'Greenwich', 'linear_time', 'raw'}, optional
Give Greenwich-referenced phase lags, an approximation
using linearized times, or raw lags.
nodal : {True, False, 'linear_time'}, optional
True (default) to include nodal/satellite corrections;
'linear_time' to use the linearized time approximation;
False to omit nodal corrections.
Returns
-------
coef : Bunch
Data container with all configuration and solution information:
Other Parameters
----------------
infer : {None, dict or Bunch}, optional; default is None.
If not None, the items are:
**inferred_names** : {sequence of N strings}
inferred constituent names
**reference_names** : {sequence of N strings}
reference constituent names
**amp_ratios** : {sequence, N or 2N floats}
amplitude ratios (unitless)
**phase_offsets** : {sequence, N or 2N floats}
phase offsets (degrees)
**approximate** : {bool, optional (default is False)}
use approximate method
amp_ratios and phase_offsets have length N for a scalar
time series, or 2N for a vector series.
order_constit : {'PE', 'SNR', 'frequency', sequence}, optional
The default is 'PE' (percent energy) order, returning results ordered from
high energy to low.
The 'SNR' order is from high signal-to-noise ratio to low, and is
available only if `conf_int` is not 'none'. The
'frequency' order is from low to high frequency. Alternatively, a
sequence of constituent names may be supplied, typically the same list as
given in the *constit* option.
MC_n : integer, optional
Not yet implemented.
robust_kw : dict, optional
Keyword arguments for `robustfit`, if `method` is 'robust'.
Rayleigh_min : float
Minimum conventional Rayleigh criterion for automatic
constituent selection; default is 1.
white : bool
If False (default), use band-averaged spectra from the
residuals in the confidence limit estimates; if True,
assume a white background spectrum.
verbose : {True, False}, optional
True (default) turns on verbose output. False emits no messages.
Note
----
`utide.reconstruct` requires the calculation of confidence intervals.
Notes
-----
To be added: much additional explanation.
There will also be more "Other Parameters".
"""
compat_opts = _process_opts(opts, v is not None)
coef = _solv1(t, u, v, lat, **compat_opts)
return coef
def _solv1(tin, uin, vin, lat, **opts):
# The following returns a possibly modified copy of tin (ndarray).
# t, u, v are fully edited ndarrays (unless v is None).
packed = _slvinit(tin, uin, vin, lat, **opts)
tin, t, u, v, tref, lor, elor, opt = packed
nt = len(t)
if opt["RunTimeDisp"]:
print("solve: ", end="")
# opt['cnstit'] = cnstit
cnstit, coef = ut_cnstitsel(
tref,
opt["rmin"] / (24 * lor),
opt["cnstit"],
opt["infer"],
)
# a function we don't need
# coef.aux.rundescr = ut_rundescr(opt,nNR,nR,nI,t,tgd,uvgd,lat)
coef.aux.opt = opt
coef.aux.lat = lat
if opt["RunTimeDisp"]:
print("matrix prep ... ", end="")
ngflgs = [opt["nodsatlint"], opt["nodsatnone"], opt["gwchlint"], opt["gwchnone"]]
E_args = (lat, ngflgs, opt.prefilt)
# Make the model array, starting with the harmonics.
E = ut_E(t, tref, cnstit.NR.frq, cnstit.NR.lind, *E_args)
# Positive and negative frequencies
B = np.hstack((E, E.conj()))
if opt.infer is not None:
Etilp = np.empty((nt, coef.nR), dtype=complex)
Etilm = np.empty((nt, coef.nR), dtype=complex)
if not opt.infer.approximate:
for k, ref in enumerate(cnstit.R):
E = ut_E(t, tref, ref.frq, ref.lind, *E_args)
# (nt,1)
Q = ut_E(t, tref, ref.I.frq, ref.I.lind, *E_args) / E
# (nt,ni)
Qsum_p = (Q * ref.I.Rp).sum(axis=1)
Etilp[:, k] = E[:, 0] * (1 + Qsum_p)
Qsum_m = (Q * np.conj(ref.I.Rm)).sum(axis=1)
Etilm[:, k] = E[:, 0] * (1 + Qsum_m)
else:
# Approximate inference.
Q = np.empty((coef.nR,), dtype=float)
beta = np.empty((coef.nR,), dtype=float)
for k, ref in enumerate(cnstit.R):
E = ut_E(t, tref, ref.frq, ref.lind, *E_args)[:, 0]
Etilp[:, k] = E
Etilm[:, k] = E
num = ut_E(tref, tref, ref.I.frq, ref.I.lind, *E_args).real
den = ut_E(tref, tref, ref.frq, ref.lind, *E_args).real
Q[k] = (num / den)[0, 0]
arg = np.pi * lor * 24 * (ref.I.frq - ref.frq) * (nt + 1) / nt
beta[k] = np.sin(arg) / arg
B = np.hstack((B, Etilp, np.conj(Etilm)))
# add the mean
B = np.hstack((B, np.ones((nt, 1))))
if not opt["notrend"]:
B = np.hstack((B, ((t - tref) / lor)[:, np.newaxis]))
# nm = B.shape[1] # 2*(nNR + nR) + 1, plus 1 if trend is included.
if opt["RunTimeDisp"]:
print("solution ... ", end="")
if opt["twodim"]:
xraw = u + 1j * v
else:
xraw = u
if opt.newopts.method == "ols":
# Model coefficients.
try:
m = np.linalg.lstsq(B, xraw, rcond=None)[0]
except TypeError:
m = np.linalg.lstsq(B, xraw)[0]
W = np.ones(nt) # Uniform weighting; we could use a scalar 1, or None.
else:
rf = robustfit(B, xraw, **opt.newopts.robust_kw)
m = rf.b
W = rf.w
coef.rf = rf
coef.weights = W
xmod = np.dot(B, m) # Model fit.
if not opt["twodim"]:
xmod = np.real(xmod)
e = W * (xraw - xmod) # Weighted residuals.
nI, nR, nNR = coef.nI, coef.nR, coef.nNR
ap = np.hstack((m[:nNR], m[2 * nNR : 2 * nNR + nR]))
i0 = 2 * nNR + nR
am = np.hstack((m[nNR : 2 * nNR], m[i0 : i0 + nR]))
Xu = np.real(ap + am)
Yu = -np.imag(ap - am)
if not opt["twodim"]:
coef["A"], _, _, coef["g"] = ut_cs2cep(Xu, Yu)
Xv = []
Yv = []
else:
Xv = np.imag(ap + am)
Yv = np.real(ap - am)
packed = ut_cs2cep(Xu, Yu, Xv, Yv)
coef["Lsmaj"], coef["Lsmin"], coef["theta"], coef["g"] = packed
# Mean and trend.
if opt["twodim"]:
if opt["notrend"]:
coef["umean"] = np.real(m[-1])
coef["vmean"] = np.imag(m[-1])
else:
coef["umean"] = np.real(m[-2])
coef["vmean"] = np.imag(m[-2])
coef["uslope"] = np.real(m[-1]) / lor
coef["vslope"] = np.imag(m[-1]) / lor
else:
if opt["notrend"]:
coef["mean"] = np.real(m[-1])
else:
coef["mean"] = np.real(m[-2])
coef["slope"] = np.real(m[-1]) / lor
if opt.infer:
# complex coefficients
apI = np.empty((nI,), dtype=complex)
amI = np.empty((nI,), dtype=complex)
ind = 0
for k, ref in enumerate(cnstit.R):
apI[ind : ind + ref.nI] = ref.I.Rp * ap[nNR + k]
amI[ind : ind + ref.nI] = ref.I.Rm * am[nNR + k]
ind += ref.nI
XuI = (apI + amI).real
YuI = -(apI - amI).imag
if not opt.twodim:
A, _, _, g = ut_cs2cep(XuI, YuI)
coef.A = np.hstack((coef.A, A))
coef.g = np.hstack((coef.g, g))
else:
XvI = (apI + amI).imag
YvI = (apI - amI).real
Lsmaj, Lsmin, theta, g = ut_cs2cep(XuI, YuI, XvI, YvI)
coef.Lsmaj = np.hstack((coef.Lsmaj, Lsmaj))
coef.Lsmin = np.hstack((coef.Lsmin, Lsmin))
coef.theta = np.hstack((coef.theta, theta))
coef.g = np.hstack((coef.g, g))
if opt["conf_int"]:
coef = _confidence(
coef,
cnstit,
opt,
t,
e,
tin,
elor,
xraw,
xmod,
W,
m,
B,
Xu,
Yu,
Xv,
Yv,
)
# Diagnostics.
if not opt["nodiagn"]:
coef = ut_diagn(coef)
# Adds a diagn dictionary, always sorted by energy.
# This doesn't seem very useful. Let's directly add the variables
# to the base coef structure. Then they can be sorted with everything
# else.
coef["PE"] = _PE(coef)
coef["SNR"] = _SNR(coef)
# Re-order constituents.
coef = _reorder(coef, opt)
# This might have added PE if it was not already present.
if opt["RunTimeDisp"]:
print("done.")
return coef
def _reorder(coef, opt):
if opt["ordercnstit"] == "PE":
# Default: order by decreasing energy.
if "PE" not in coef:
coef["PE"] = _PE(coef)
ind = coef["PE"].argsort()[::-1]
elif opt["ordercnstit"] == "frequency":
ind = coef["aux"]["frq"].argsort()
elif opt["ordercnstit"] == "SNR":
# If we are here, we should be guaranteed to have SNR already.
ind = coef["SNR"].argsort()[::-1]
else:
namelist = list(coef["name"])
ilist = [namelist.index(name) for name in opt["ordercnstit"]]
ind = np.array(ilist, dtype=int)
arrays = "name PE SNR A A_ci g g_ci Lsmaj Lsmaj_ci Lsmin Lsmin_ci theta theta_ci"
reorderlist = [a for a in arrays.split() if a in coef]
for key in reorderlist:
coef[key] = coef[key][ind]
coef["aux"]["frq"] = coef["aux"]["frq"][ind]
coef["aux"]["lind"] = coef["aux"]["lind"][ind]
return coef
def _slvinit(tin, uin, vin, lat, **opts):
if lat is None:
raise ValueError("Latitude must be supplied")
# Supporting only 1-D arrays for now; we can add "group"
# support later.
if tin.shape != uin.shape or tin.ndim != 1 or uin.ndim != 1:
raise ValueError("t and u must be 1-D arrays")
if vin is not None and vin.shape != uin.shape:
raise ValueError("v must have the same shape as u")
opt = Bunch(twodim=(vin is not None))
# Step 0: apply epoch to time.
tin = _normalize_time(tin, opts["epoch"])
# Step 1: remove invalid times from tin, uin, vin
tin = np.ma.masked_invalid(tin)
uin = np.ma.masked_invalid(uin)
if vin is not None:
vin = np.ma.masked_invalid(vin)
if np.ma.is_masked(tin):
goodmask = ~np.ma.getmaskarray(tin)
uin = uin.compress(goodmask)
if vin is not None:
vin = vin.compress(goodmask)
tin = tin.compressed() # No longer masked.
# Step 2: generate t, u, v from edited tin, uin, vin.
v = None
if np.ma.is_masked(uin) or np.ma.is_masked(vin):
mask = np.ma.getmaskarray(uin)
if vin is not None:
mask = np.ma.mask_or( | np.ma.getmaskarray(vin) | numpy.ma.getmaskarray |
#!/usr/bin/env python
"""
Interpolation of scattered data using ordinary kriging/collocation
The program uses nearest neighbors interpolation and selects data from eight
quadrants around the prediction point and uses a third-order Gauss-Markov
covariance model, with a correlation length defined by the user.
Provides the possibility of pre-cleaning of the data using a spatial n-sigma
filter before interpolation.
Observations with provided noise/error estimates (for each observation) are
added to the diagonal of the covariance matrix if provided. User can also
provide a constant rms-noise added to the diagonal.
Takes as input a h5df file with needed data in geographical coordinates
and a-priori error if needed. The user provides the wanted projection
using the EPSG projection format.
Output consists of an hdf5 file containing the predictions, rmse and the
number of points used in the prediction, and the epsg number for the
projection.
Notes:
If both the a-priori errors are provided and constant rms all values
smaller then provided rms is set to this value providing a minimum
error for the observations.
To reduce the impact of highly correlated along-track measurements
(seen as streaks in the interpolated raster) the 'rand' option
can be used. This randomly samples N-observations in each quadrant
instead of using the closest data points.
Example:
python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \
-c 50 10 -v lon lat dhdt dummy -e 0.1 -m dist
python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \
-c 50 10 -v lon lat dhdt rmse -e 0.1 -m rand
Credits:
captoolkit - JPL Cryosphere Altimetry Processing Toolkit
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
Jet Propulsion Laboratory, California Institute of Technology
"""
import h5py
import pyproj
import argparse
import numpy as np
from scipy import stats
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
def rand(x, n):
"""Draws random samples from array"""
# Determine data density
if len(x) > n:
# Draw random samples from array
I = np.random.choice(np.arange(len(x)), n, replace=False)
else:
# Output boolean vector - true
I = np.ones(len(x), dtype=bool)
return I
def sort_dist(d, n):
""" Sort array by distance"""
# Determine if sorting needed
if len(d) >= n:
# Sort according to distance
I = np.argsort(d)
else:
# Output boolean vector - true
I = np.ones(len(x), dtype=bool)
return I
def transform_coord(proj1, proj2, x, y):
"""Transform coordinates from proj1 to proj2 (EPSG num)."""
# Set full EPSG projection strings
proj1 = pyproj.Proj("+init=EPSG:" + proj1)
proj2 = pyproj.Proj("+init=EPSG:" + proj2)
# Convert coordinates
return pyproj.transform(proj1, proj2, x, y)
def make_grid(xmin, xmax, ymin, ymax, dx, dy):
""" Construct output grid-coordinates. """
Nn = int((np.abs(ymax - ymin)) / dy) + 1 # ny
Ne = int((np.abs(xmax - xmin)) / dx) + 1 # nx
xi = np.linspace(xmin, xmax, num=Ne)
yi = np.linspace(ymin, ymax, num=Nn)
return np.meshgrid(xi, yi)
def spatial_filter(x, y, z, dx, dy, sigma=5.0):
""" Cleaning of spatial data """
# Grid dimensions
Nn = int((np.abs(y.max() - y.min())) / dy) + 1
Ne = int((np.abs(x.max() - x.min())) / dx) + 1
# Bin data
f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn))
# Get bin numbers for the data
index = f_bin.binnumber
# Unique indexes
ind = np.unique(index)
# Create output
zo = z.copy()
# Number of unique index
for i in range(len(ind)):
# index for each bin
idx, = np.where(index == ind[i])
# Get data
zb = z[idx]
# Make sure we have enough
if len(zb[~np.isnan(zb)]) == 0:
continue
# Set to median of values
dh = zb - np.nanmedian(zb)
# Identify outliers
foo = np.abs(dh) > sigma * np.nanstd(dh)
# Set to nan-value
zb[foo] = np.nan
# Replace data
zo[idx] = zb
# Return filtered array
return zo
# Description of algorithm
des = 'Interpolation of scattered data using ordinary kriging/collocation'
# Define command-line arguments
parser = argparse.ArgumentParser(description=des)
parser.add_argument(
'ifile', metavar='ifile', type=str, nargs='+',
help='name of input file (h5-format)')
parser.add_argument(
'ofile', metavar='ofile', type=str, nargs='+',
help='name of ouput file (h5-format)')
parser.add_argument(
'-b', metavar=('w', 'e', 's', 'n'), dest='bbox', type=float, nargs=4,
help=('bounding box for geograph. region (deg or m), optional'),
default=[None], )
parser.add_argument(
'-d', metavar=('dx', 'dy'), dest='dxy', type=float, nargs=2,
help=('spatial resolution for grid (deg or km)'),
default=[1, 1], )
parser.add_argument(
'-n', metavar='nobs', dest='nobs', type=int, nargs=1,
help=('number of obs. for each quadrant'),
default=[None], )
parser.add_argument(
'-r', metavar='radius', dest='radius', type=float, nargs=1,
help=('cut off distance cutoff in (km)'),
default=[None], )
parser.add_argument(
'-a', metavar='alpha', dest='alpha', type=float, nargs=1,
help=('correlation length (km)'),
default=[None], )
parser.add_argument(
'-p', metavar=('epsg_num'), dest='proj', type=str, nargs=1,
help=('EPSG proj number (AnIS=3031, GrIS=3413)'),
default=['3031'], )
parser.add_argument(
'-c', metavar=('dim', 'thres'), dest='filter', type=float, nargs=2,
help=('dim. of filter in km and sigma thres'),
default=[0, 0], )
parser.add_argument(
'-v', metavar=('x', 'y', 'z', 's'), dest='vnames', type=str, nargs=4,
help=('name of varibales in the HDF5-file'),
default=['lon', 'lat', 'h_cor', 'h_rms'], )
parser.add_argument(
'-e', metavar='sigma', dest='sigma', type=float, nargs=1,
help=('constant rms noise value'),
default=[0], )
parser.add_argument(
'-m', metavar=None, dest='mode', type=str, nargs=1,
help=('sampling mode: random (rand) or distance (dist).'),
choices=('rand', 'dist'), default=['dist'], )
# Parser argument to variable
args = parser.parse_args()
# Read input from terminal
ifile = args.ifile[0]
ofile = args.ofile[0]
bbox = args.bbox
dx = args.dxy[0] * 1e3
dy = args.dxy[1] * 1e3
proj = args.proj[0]
nobs = args.nobs[0]
dmax = args.radius[0] * 1e3
alpha = args.alpha[0] * 1e3
sigma = args.sigma[0]
dxy = args.filter[0] * 1e3
thres = args.filter[1]
mode = args.mode[0]
vicol = args.vnames[:]
# Print parameters to screen
print('parameters:')
for p in list(vars(args).items()): print(p)
# Get variable names
xvar, yvar, zvar, svar = vicol
# Load all 1d variables needed
with h5py.File(ifile, 'r') as fi:
# Get variables
lon = fi[xvar][:]
lat = fi[yvar][:]
zp = fi[zvar][:]
sp = fi[svar][:] if svar in fi else np.ones(lon.shape)
# Remove data with NaN's
lon, lat, zp, sp = lon[~np.isnan(zp)], lat[~np.isnan(zp)], \
zp[~np.isnan(zp)], sp[~np.isnan(zp)]
# Transform coordinates to wanted projection
xp, yp = transform_coord('4326', proj, lon, lat)
# Test for different types of input
if bbox[0] is not None:
# Extract bounding box elements
xmin, xmax, ymin, ymax = bbox
else:
# Create bounding box limits
xmin, xmax, ymin, ymax = (xp.min() - 50. * dx), (xp.max() + 50. * dx), \
(yp.min() - 50. * dy), (yp.max() + 50. * dy)
# Construct the grid
Xi, Yi = make_grid(xmin, xmax, ymin, ymax, dx, dy)
# Flatten prediction grid
xi = Xi.ravel()
yi = Yi.ravel()
# Markov-model parameter
a = 0.9132 * alpha
# Signal variance of entire field
c0 = np.nanvar(zp)
# Compute noise variance
crms = sigma * sigma
# Output vectors
zi = np.ones(len(xi)) * np.nan
ei = np.ones(len(xi)) * np.nan
ni = np.ones(len(xi)) * np.nan
# Determine nobs for tree
if mode == 'rand':
n_quad = 16
else:
n_quad = 8
# Check if we should filter
if dxy != 0:
print('-> cleaning data ...')
# Clean the data in the spatial domain
zp = spatial_filter(xp.copy(), yp.copy(), zp.copy(), dxy, dxy, sigma=thres)
# Remove data with NaN's
xp, yp, zp, sp = xp[~np.isnan(zp)], yp[~np.isnan(zp)], zp[~np.isnan(zp)], \
sp[~np.isnan(zp)]
print("-> creating KDTree ...")
# Construct cKDTree
TreeP = cKDTree(np.c_[xp, yp])
# Enter prediction loop
for i in range(len(xi)):
# Find closest observations
(dr, idx) = TreeP.query((xi[i], yi[i]), nobs * n_quad)
# Test if closest point to far away
if (np.min(dr) > dmax) or (len(zp[idx]) < 2): continue
# Parameters inside cap
x = xp[idx]
y = yp[idx]
z = zp[idx]
s = sp[idx]
# Noise handling
if np.all(sp == 1):
# Provide all obs. with the same RMS
c = np.ones(len(x)) * crms
else:
# Set all obs. errors < crms to crms
c = s ** 2
c[c < crms] = crms
# Compute angle to data points
theta = (180.0 / np.pi) * np.arctan2(y - yi[i], x - xi[i]) + 180
# Get index for data in 8-sectors
IQ1 = (theta > 0) & (theta < 45)
IQ2 = (theta > 45) & (theta < 90)
IQ3 = (theta > 90) & (theta < 135)
IQ4 = (theta > 135) & (theta < 180)
IQ5 = (theta > 180) & (theta < 225)
IQ6 = (theta > 225) & (theta < 270)
IQ7 = (theta > 270) & (theta < 315)
IQ8 = (theta > 315) & (theta < 360)
# Merge all data to sectors
Q1 = np.vstack((x[IQ1], y[IQ1], z[IQ1], c[IQ1], dr[IQ1])).T
Q2 = np.vstack((x[IQ2], y[IQ2], z[IQ2], c[IQ2], dr[IQ2])).T
Q3 = np.vstack((x[IQ3], y[IQ3], z[IQ3], c[IQ3], dr[IQ3])).T
Q4 = np.vstack((x[IQ4], y[IQ4], z[IQ4], c[IQ4], dr[IQ4])).T
Q5 = np.vstack((x[IQ5], y[IQ5], z[IQ5], c[IQ5], dr[IQ5])).T
Q6 = np.vstack((x[IQ6], y[IQ6], z[IQ6], c[IQ6], dr[IQ6])).T
Q7 = np.vstack((x[IQ7], y[IQ7], z[IQ7], c[IQ7], dr[IQ7])).T
Q8 = np.vstack((x[IQ8], y[IQ8], z[IQ8], c[IQ8], dr[IQ8])).T
# Sampling strategy
if mode == 'rand':
# Draw random samples from each sector
I1 = rand(Q1[:, 0], nobs)
I2 = rand(Q2[:, 0], nobs)
I3 = rand(Q3[:, 0], nobs)
I4 = rand(Q4[:, 0], nobs)
I5 = rand(Q5[:, 0], nobs)
I6 = rand(Q6[:, 0], nobs)
I7 = rand(Q7[:, 0], nobs)
I8 = rand(Q8[:, 0], nobs)
else:
# Draw closest samples from each sector
I1 = rand(Q1[:, 4], nobs)
I2 = rand(Q2[:, 4], nobs)
I3 = rand(Q3[:, 4], nobs)
I4 = rand(Q4[:, 4], nobs)
I5 = rand(Q5[:, 4], nobs)
I6 = rand(Q6[:, 4], nobs)
I7 = rand(Q7[:, 4], nobs)
I8 = rand(Q8[:, 4], nobs)
# Stack the data
Q18 = np.vstack((Q1[I1, :], Q2[I2, :], Q3[I3, :], Q4[I4, :], Q5[I5, :], \
Q6[I6, :], Q7[I7, :], Q8[I8, :]))
# Extract position and data
xc = Q18[:, 0]
yc = Q18[:, 1]
zc = Q18[:, 2]
cc = Q18[:, 3]
# Distance from grid node to data
Dxy = Q18[:, 4]
# Estimate local median (robust) and local variance of data
m0 = np.nanmedian(zc)
# Covariance function for Dxy
Cxy = c0 * (1 + (Dxy / a) - 0.5 * (Dxy / a) ** 2) * np.exp(-Dxy / a)
# Compute pair-wise distance
Dxx = cdist(list(zip(xc, yc)), list(zip(xc, yc)), "euclidean")
# Covariance function Dxx
Cxx = c0 * (1 + (Dxx / a) - 0.5 * (Dxx / a) ** 2) * np.exp(-Dxx / a)
# Measurement noise matrix
N = np.eye(len(Cxx)) * | np.diag(cc) | numpy.diag |
import logging
import numpy as np
import scipy.integrate
class ZNDSolver(object):
"""Solver for steady solution"""
def __init__(self, config, reaction_rate):
self._config = config
self._reaction_rate = reaction_rate
self._max_lamda = 1.0 - self._config.lambda_tol
self._logger = logging.getLogger(__name__)
self._compute_parameters()
def compute(self, grid):
self._logger.info('Starting ZND structure computations')
assert grid[0] < 0.0, 'Left boundary should be negative'
#assert grid[-1] == 0.0, 'Right boundary should be zero'
msg = ('Domain length {0:.16f} is smaller than computed steady '
'reaction length {1:.16f}')
msg = msg.format( | np.abs(grid[0]) | numpy.abs |
import numpy as np
class CEM:
def __init__(self, args, num_params, mu_init=None):
# Params inform.
self.num_params = num_params
self.mu = np.zeros(self.num_params) if (mu_init is None) else | np.array(mu_init) | numpy.array |
"""Mapping out different CBMs with different numbers and different paradigms.
"""
import patches
import lettertask
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import plotnine as gg
import lazytools_sflippl as lazytools
import torch.nn as nn
# DATA =========================================================================
cbms = [
lettertask.data.CompositionalBinaryModel(
width=width, change_probability=change_probability,
samples=10000, seed=2002
) for width, change_probability in zip(
[[5, 5], [5, 5], [50, 50], [50, 50],
[5, 10], [10, 50], [5, 5, 5], [10, 10, 10]],
[[0.05, 0.5], [0.05, 0.2], [0.05, 0.5], [0.05, 0.2],
[0.2, 0.05], [0.2, 0.1], [0.05, 0.1, 0.5], [0.05, 0.1, 0.5]]
)
]
# OPTIMIZATION REGIMES =========================================================
def regime(method, lr):
def optimizer(params):
return method(params, lr=lr)
return optimizer
optimization_regimes = []
for method in [optim.SGD, optim.Adam, optim.Adadelta]:
for lr in [1e-2, 1e-1, 1]:
optimization_regimes.append(regime(method, lr))
loss_dfs = []
angle_dfs = []
n_epochs = 5
# NEURAL NETWORKS ==============================================================
class BaRec(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps, bias=bias)
self.decoder = nn.Conv1d(latent_features, input_features, 1, bias=bias)
def forward(self, x):
code = self.encoder(x['current_values'])
prediction = self.predictor(code)
decoded = self.decoder(prediction).transpose(1, 2)
return decoded
class LaPred1P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features,
bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(code).\
reshape(self.timesteps, self.latent_features)
return prediction
class LaPred2P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features,
bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(x['latent_values']).\
reshape(self.timesteps, self.latent_features)
return {
'latent_values': code,
'latent_prediction': prediction
}
# MAIN PART ====================================================================
with tqdm(total = n_epochs*len(optimization_regimes)*len(cbms)*4) as pbar:
for idx_cbm, cbm in enumerate(cbms):
ideal = np.identity(len(cbm.width)).repeat(cbm.width, 1)
for idx_opt, opt in enumerate(optimization_regimes):
cts = patches.data.Contrastive1DTimeSeries(cbm.to_array(), seed=202)
# BaRec ============================================================
## Prepare =========================================================
barec = BaRec(1, data=cts)
optimizer = opt(barec.parameters())
criterion = nn.MSELoss()
loss_traj = []
angles = []
running_loss = 0
## Fit =============================================================
for epoch in range(n_epochs):
for i, data in enumerate(cts):
if i<len(cts):
if i % 10 == 0:
est = next(barec.parameters()).detach().numpy()
angles.append( | np.matmul(ideal, est.T) | numpy.matmul |
import os.path as osp
import numpy as np
import math
import torch
import json
import copy
import transforms3d
import scipy.sparse
import cv2
from pycocotools.coco import COCO
from core.config import cfg
from graph_utils import build_coarse_graphs
from noise_utils import synthesize_pose
from smpl import SMPL
from coord_utils import world2cam, cam2pixel, process_bbox, rigid_align, get_bbox
from aug_utils import affine_transform, j2d_processing, augm_params, j3d_processing, flip_2d_joint
from Human36M.noise_stats import error_distribution
from funcs_utils import save_obj, stop
from vis import vis_3d_pose, vis_2d_pose
class Human36M(torch.utils.data.Dataset):
def __init__(self, mode, args):
dataset_name = 'Human36M'
self.debug = args.debug
self.data_split = mode
self.img_dir = osp.join(cfg.data_dir, dataset_name, 'images')
self.annot_path = osp.join(cfg.data_dir, dataset_name, 'annotations')
self.subject_genders = {1: 'female', 5: 'female', 6: 'male', 7: 'female', 8: 'male', 9: 'male', 11: 'male'}
self.protocol = 2
self.action_name = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing', 'Purchases',
'Sitting', 'SittingDown', 'Smoking', 'Photo', 'Waiting', 'Walking', 'WalkDog',
'WalkTogether']
self.fitting_thr = 25 # milimeter
# SMPL joint set
self.mesh_model = SMPL()
self.smpl_root_joint_idx = self.mesh_model.root_joint_idx
self.smpl_face_kps_vertex = self.mesh_model.face_kps_vertex
self.smpl_vertex_num = 6890
self.smpl_joint_num = 24
self.smpl_flip_pairs = ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23))
self.smpl_skeleton = (
(0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17),
(17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 15))
self.joint_regressor_smpl = self.mesh_model.layer['neutral'].th_J_regressor
# H36M joint set
self.human36_joint_num = 17
self.human36_joints_name = (
'Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head',
'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist')
self.human36_flip_pairs = ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))
self.human36_skeleton = (
(0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2),
(2, 3), (0, 4), (4, 5), (5, 6))
self.human36_root_joint_idx = self.human36_joints_name.index('Pelvis')
self.human36_error_distribution = self.get_stat()
self.human36_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16)
self.joint_regressor_human36 = self.mesh_model.joint_regressor_h36m
# COCO joint set
self.coco_joint_num = 19 # 17 + 2, manually added pelvis and neck
self.coco_joints_name = (
'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist',
'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck')
self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16))
self.coco_skeleton = (
(1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13),
(13, 15), #(5, 6), #(11, 12),
(17, 11), (17, 12), (17, 18), (18, 5), (18, 6), (18, 0))
self.joint_regressor_coco = self.mesh_model.joint_regressor_coco
self.input_joint_name = cfg.DATASET.input_joint_set # 'coco'
self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting(self.input_joint_name)
self.datalist, skip_idx, skip_img_path = self.load_data()
if self.data_split == 'test':
det_2d_data_path = osp.join(cfg.data_dir, dataset_name, 'absnet_output_on_testset.json')
self.datalist_pose2d_det = self.load_pose2d_det(det_2d_data_path, skip_img_path)
print("Check lengths of annotation and detection output: ", len(self.datalist), len(self.datalist_pose2d_det))
self.graph_Adj, self.graph_L, self.graph_perm, self.graph_perm_reverse = \
build_coarse_graphs(self.mesh_model.face, self.joint_num, self.skeleton, self.flip_pairs, levels=9)
def load_pose2d_det(self, data_path, skip_list):
pose_list = []
with open(data_path) as f:
data = json.load(f)
for img_path, pose2d in data.items():
pose2d = np.array(pose2d, dtype=np.float32)
if img_path in skip_list:
continue
pose_list.append({'img_name': img_path, 'pose2d': pose2d})
pose_list = sorted(pose_list, key=lambda x: x['img_name'])
return pose_list
def get_joint_setting(self, joint_category='human36'):
joint_num = eval(f'self.{joint_category}_joint_num')
skeleton = eval(f'self.{joint_category}_skeleton')
flip_pairs = eval(f'self.{joint_category}_flip_pairs')
return joint_num, skeleton, flip_pairs
def get_subsampling_ratio(self):
if self.data_split == 'train':
return 5 # 50
elif self.data_split == 'test':
return 50 #
else:
assert 0, print('Unknown subset')
def get_subject(self):
if self.data_split == 'train':
if self.protocol == 1:
subject = [1, 5, 6, 7, 8, 9]
elif self.protocol == 2:
subject = [1, 5, 6, 7, 8]
elif self.data_split == 'test':
if self.protocol == 1:
subject = [11]
elif self.protocol == 2:
subject = [9, 11]
else:
assert 0, print("Unknown subset")
if self.debug:
subject = subject[0:1]
return subject
def get_stat(self):
ordered_stats = []
for joint in self.human36_joints_name:
item = list(filter(lambda stat: stat['Joint'] == joint, error_distribution))[0]
ordered_stats.append(item)
return ordered_stats
def generate_syn_error(self):
noise = np.zeros((self.human36_joint_num, 2), dtype=np.float32)
weight = np.zeros(self.human36_joint_num, dtype=np.float32)
for i, ed in enumerate(self.human36_error_distribution):
noise[i, 0] = np.random.normal(loc=ed['mean'][0], scale=ed['std'][0])
noise[i, 1] = np.random.normal(loc=ed['mean'][1], scale=ed['std'][1])
weight[i] = ed['weight']
prob = np.random.uniform(low=0.0, high=1.0, size=self.human36_joint_num)
weight = (weight > prob)
noise = noise * weight[:, None]
return noise
def load_data(self):
print('Load annotations of Human36M Protocol ' + str(self.protocol))
subject_list = self.get_subject()
sampling_ratio = self.get_subsampling_ratio()
# aggregate annotations from each subject
db = COCO()
cameras = {}
joints = {}
smpl_params = {}
for subject in subject_list:
# data load
with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_data.json'), 'r') as f:
annot = json.load(f)
if len(db.dataset) == 0:
for k, v in annot.items():
db.dataset[k] = v
else:
for k, v in annot.items():
db.dataset[k] += v
# camera load
with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_camera.json'), 'r') as f:
cameras[str(subject)] = json.load(f)
# joint coordinate load
with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_joint_3d.json'), 'r') as f:
joints[str(subject)] = json.load(f)
# smpl parameter load
with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_smpl_param.json'), 'r') as f:
smpl_params[str(subject)] = json.load(f)
db.createIndex()
skip_idx = []
datalist = []
skip_img_idx = []
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
img_name = img_path.split('/')[-1]
# check subject and frame_idx
frame_idx = img['frame_idx'];
if frame_idx % sampling_ratio != 0:
continue
# check smpl parameter exist
subject = img['subject'];
action_idx = img['action_idx'];
subaction_idx = img['subaction_idx'];
frame_idx = img['frame_idx'];
try:
smpl_param = smpl_params[str(subject)][str(action_idx)][str(subaction_idx)][str(frame_idx)]
except KeyError:
skip_idx.append(image_id)
skip_img_idx.append(img_path.split('/')[-1])
continue
smpl_param['gender'] = 'neutral' # self.subject_genders[subject] # set corresponding gender
# camera parameter
cam_idx = img['cam_idx']
cam_param = cameras[str(subject)][str(cam_idx)]
R, t, f, c = np.array(cam_param['R'], dtype=np.float32), np.array(cam_param['t'],
dtype=np.float32), np.array(
cam_param['f'], dtype=np.float32), np.array(cam_param['c'], dtype=np.float32)
cam_param = {'R': R, 't': t, 'focal': f, 'princpt': c}
# project world coordinate to cam, image coordinate space
joint_world = np.array(joints[str(subject)][str(action_idx)][str(subaction_idx)][str(frame_idx)],
dtype=np.float32)
joint_cam = world2cam(joint_world, R, t)
joint_img = cam2pixel(joint_cam, f, c)
joint_vis = np.ones((self.human36_joint_num, 1))
bbox = process_bbox(np.array(ann['bbox']))
if bbox is None: continue
datalist.append({
'img_path': img_path,
'img_name': img_name,
'img_id': image_id,
'bbox': bbox,
'img_hw': (img['height'], img['width']),
'joint_img': joint_img, # [x_img, y_img, z_cam]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'smpl_param': smpl_param,
'cam_param': cam_param})
datalist = sorted(datalist, key=lambda x: x['img_name'])
return datalist, skip_idx, skip_img_idx
def get_smpl_coord(self, smpl_param, cam_param):
pose, shape, trans, gender = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'], smpl_param['gender']
# smpl parameters (pose: 72 dimension, shape: 10 dimension)
smpl_pose = torch.FloatTensor(pose).view(-1, 3)
smpl_shape = torch.FloatTensor(shape).view(1, -1)
# translation vector from smpl coordinate to h36m world coordinate
trans = np.array(trans, dtype=np.float32).reshape(3)
# camera rotation and translation
R, t = np.array(cam_param['R'],dtype=np.float32).reshape(3, 3), np.array(cam_param['t'],dtype=np.float32).reshape(3)
# change to mean shape if beta is too far from it
smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0.
# transform world coordinate to camera coordinate
root_pose = smpl_pose[self.smpl_root_joint_idx, :].numpy()
angle = np.linalg.norm(root_pose)
root_pose = transforms3d.axangles.axangle2mat(root_pose / angle, angle)
root_pose = np.dot(R, root_pose)
axis, angle = transforms3d.axangles.mat2axangle(root_pose)
root_pose = axis * angle
smpl_pose[self.smpl_root_joint_idx] = torch.from_numpy(root_pose)
smpl_pose = smpl_pose.view(1, -1)
# get mesh and joint coordinates
smpl_mesh_coord, smpl_joint_coord = self.mesh_model.layer[gender](smpl_pose, smpl_shape)
# incorporate face keypoints
smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1, 3);
smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1, 3)
# smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex, :].reshape(-1, 3)
# smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord))
# compenstate rotation (translation from origin to root joint was not cancled)
smpl_trans = np.array(trans, dtype=np.float32).reshape(
3) # translation vector from smpl coordinate to h36m world coordinate
smpl_trans = np.dot(R, smpl_trans[:, None]).reshape(1, 3) + t.reshape(1, 3) / 1000
root_joint_coord = smpl_joint_coord[self.smpl_root_joint_idx].reshape(1, 3)
smpl_trans = smpl_trans - root_joint_coord + np.dot(R, root_joint_coord.transpose(1, 0)).transpose(1, 0)
# translation
smpl_mesh_coord += smpl_trans; smpl_joint_coord += smpl_trans
# meter -> milimeter
smpl_mesh_coord *= 1000; smpl_joint_coord *= 1000;
return smpl_mesh_coord, smpl_joint_coord
def get_fitting_error(self, h36m_joint, smpl_mesh):
h36m_joint = h36m_joint - h36m_joint[self.human36_root_joint_idx,None,:] # root-relative
h36m_from_smpl = np.dot(self.joint_regressor_human36, smpl_mesh)
# translation alignment
h36m_from_smpl = h36m_from_smpl - np.mean(h36m_from_smpl,0)[None,:] + np.mean(h36m_joint,0)[None,:]
error = np.sqrt(np.sum((h36m_joint - h36m_from_smpl)**2,1)).mean()
return error
def get_coco_from_mesh(self, mesh_coord_cam, cam_param):
# regress coco joints
joint_coord_cam = np.dot(self.joint_regressor_coco, mesh_coord_cam)
joint_coord_cam = self.add_pelvis_and_neck(joint_coord_cam)
# projection
f, c = cam_param['focal'], cam_param['princpt']
joint_coord_img = cam2pixel(joint_coord_cam, f, c)
joint_coord_img[:, 2] = 1
return joint_coord_cam, joint_coord_img
def add_pelvis_and_neck(self, joint_coord):
lhip_idx = self.coco_joints_name.index('L_Hip')
rhip_idx = self.coco_joints_name.index('R_Hip')
pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5
pelvis = pelvis.reshape((1, -1))
lshoulder_idx = self.coco_joints_name.index('L_Shoulder')
rshoulder_idx = self.coco_joints_name.index('R_Shoulder')
neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5
neck = neck.reshape((1,-1))
joint_coord = np.concatenate((joint_coord, pelvis, neck))
return joint_coord
def __len__(self):
return len(self.datalist)
def __getitem__(self, idx):
data = copy.deepcopy(self.datalist[idx])
img_id, bbox, smpl_param, cam_param, img_shape = data['img_id'], data['bbox'].copy(), data['smpl_param'].copy(), data['cam_param'].copy(), data['img_hw']
flip, rot = augm_params(is_train=(self.data_split == 'train'))
# smpl coordinates
mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param, cam_param)
# regress coco joints
joint_cam_coco, joint_img_coco = self.get_coco_from_mesh(mesh_cam, cam_param)
# h36m joints from datasets
joint_cam_h36m, joint_img_h36m = data['joint_cam'], data['joint_img'][:, :2]
# root relative camera coordinate
mesh_cam = mesh_cam - joint_cam_h36m[:1]
# joint_cam_smpl = joint_cam_smpl - joint_cam_h36m[:1]
joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1]
joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1]
# joint_cam is PoseNet target
if self.input_joint_name == 'coco':
joint_img, joint_cam = joint_img_coco, joint_cam_coco
elif self.input_joint_name == 'human36':
joint_img, joint_cam = joint_img_h36m, joint_cam_h36m
# make new bbox
tight_bbox = get_bbox(joint_img)
bbox = process_bbox(tight_bbox.copy())
# aug
joint_img, trans = j2d_processing(joint_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, 0, None)
if not cfg.DATASET.use_gt_input:
joint_img = self.replace_joint_img(idx, img_id, joint_img, tight_bbox, trans)
if flip:
joint_img = flip_2d_joint(joint_img, cfg.MODEL.input_shape[1], self.flip_pairs)
joint_cam = j3d_processing(joint_cam, rot, flip, self.flip_pairs)
# vis
# img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# new_img = cv2.warpAffine(img, trans, (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), flags=cv2.INTER_LINEAR)
# vis_2d_pose(joint_img, new_img, self.human36_skeleton, prefix='detection')
# vis_3d_pose(joint_cam, self.human36_skeleton, joint_set_name='human36', gt=True)
# -> 0~1
joint_img = joint_img[:, :2]
joint_img /= np.array([[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]])
# normalize loc&scale
mean, std = | np.mean(joint_img, axis=0) | numpy.mean |
# (c) <NAME> & <NAME>
# routines for fitting histograms
import numpy as np
import scipy.special as sps
MAX_NEWTON_ITERATIONS = 1000
def gauss_fit(data, binwidth=None):
"""
Fits a Gaussian pdf to a set of independent values (data) using
maximum likelihood estimators. If fitting to a histogram, the
resulting fit can be normalized if the binwidth is also supplied.
"""
if binwidth is None:
norm = 1
else:
norm = np.float(np.size(data) * binwidth)
mean = np.mean(data)
std = np.std(data)
optParam = norm, mean, std
return optParam
def gamma_fit(data, binwidth):
"""
Fits a Gamma pdf to independent values (data) using
maximum likelihood estimators.
"""
if binwidth is None:
norm = 1
else:
norm = np.float(np.size(data) * binwidth)
# for large k where the Gaussian distribution is approached,
# k = np.power(np.mean(data), 2.) / np.power(np.std(data), 2.)
# In general, k can be approxmated to within 1.5% as
s = np.log(np.mean(data)) - np.mean(np.log(data))
kguess = (3. - s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
# k = kguess # "accurate to within 1.5%" according to wikipedia.org
# We can solve for k numerically using Newton's method.
# Scipy.special has the digamma (psi) and its derivative (polygamma)
# required for this.
k = k_param(kguess, s)
theta = np.mean(data)/k
optParam = norm, k, theta
return optParam
def k_param(kguess, s):
"""
Finds the root of the maximum likelihood estimator
for k using Newton's method. Routines for using Newton's method
exist within the scipy package but they were not explored. This
function is sufficiently well behaved such that we should not
have problems solving for k, especially since we have a good
estimate of k to use as a starting point.
"""
k = kguess
val = np.log(k) - sps.psi(k) - s
counter = 0
while np.abs(val) >= 0.0001:
k = k - (np.log(k)-sps.psi(k)-s)/(1/k-sps.polygamma(1, k))
val = | np.log(k) | numpy.log |
#
# Utility functions for loading and creating and solving circuits defined by
# netlists
#
import numpy as np
import codecs
import pandas as pd
import liionpack as lp
import os
import pybamm
import scipy as sp
from lcapy import Circuit
def read_netlist(
filepath,
Ri=None,
Rc=None,
Rb=None,
Rt=None,
I=None,
V=None,
):
"""
Assumes netlist has been saved by LTSpice with format Descriptor Node1 Node2 Value
Any lines starting with * are comments and . are commands so ignore them
Nodes begin with N so remove that
Open ended components are not allowed and their nodes start with NC (no-connection)
Args:
filepath (str): Path to netlist circuit file '.cir' or '.txt'.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terminal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
# Read in the netlist
if "." not in filepath:
filepath += ".cir"
if not os.path.isfile(filepath):
temp = os.path.join(lp.CIRCUIT_DIR, filepath)
if not os.path.isfile(temp):
pass
else:
filepath = temp
if ".cir" in filepath:
with codecs.open(filepath, "r", "utf-16LE") as fd:
Lines = fd.readlines()
elif ".txt" in filepath:
with open(filepath, "r") as f:
Lines = f.readlines()
else:
raise FileNotFoundError(
'Please supply a valid file with extension ".cir" or ".txt"'
)
# Ignore lines starting with * or .
Lines = [l.strip("\n").split(" ") for l in Lines if l[0] not in ["*", "."]]
Lines = np.array(Lines, dtype="<U16")
# Read descriptions and nodes, strip N from nodes
# Lines is desc | node1 | node2
desc = Lines[:, 0]
node1 = Lines[:, 1]
node2 = Lines[:, 2]
value = Lines[:, 3]
try:
value = value.astype(float)
except ValueError:
pass
node1 = np.array([x.strip("N") for x in node1], dtype=int)
node2 = np.array([x.strip("N") for x in node2], dtype=int)
netlist = pd.DataFrame(
{"desc": desc, "node1": node1, "node2": node2, "value": value}
)
# Populate the values based on the descriptions (element types)
for name, val in [
("Ri", Ri),
("Rc", Rc),
("Rb", Rb),
("Rl", Rb),
("Rt", Rt),
("I", I),
("V", V),
]:
if val is not None:
# netlist["desc"] consists of entries like 'Ri13'
# this map finds all the entries that start with (e.g.) 'Ri'
name_map = netlist["desc"].str.find(name) > -1
# then allocates the value to the corresponding indices
netlist.loc[name_map, ("value")] = val
lp.logger.notice("netlist " + filepath + " loaded")
return netlist
def setup_circuit(
Np=1,
Ns=1,
Ri=1e-2,
Rc=1e-2,
Rb=1e-4,
Rt=1e-5,
I=80.0,
V=4.2,
plot=False,
terminals="left",
):
"""
Define a netlist from a number of batteries in parallel and series
Args:
Np (int): Number of batteries in parallel.
Ns (int): Number of batteries in series.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terminal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
plot (bool): Plot the circuit.
terminals (string): The location of the terminals. Can be "left", "right",
"left-right", "right-left" or a list or array of node integers.
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
Nc = Np
Nr = Ns * 3 + 1
grid = np.arange(Nc * Nr).reshape([Nr, Nc])
coords = np.indices(grid.shape)
y = coords[0, :, :]
x = coords[1, :, :]
# make contiguous now instead of later when netlist is done as very slow
mask = np.ones([Nr, Nc], dtype=bool)
# This is no longer needed as terminals connect directly to battery
# Guess could also add a terminal connection resistor though
# mask[1:-1, 0] = False
grid[mask] = np.arange(np.sum(mask)) + 1
x = x[mask].flatten()
y = y[mask].flatten()
grid[~mask] = -2 # These should never be used
# grid is a Nr x Nc matrix
# 1st column is terminals only
# 1st and last rows are busbars
# Other rows alternate between series resistor and voltage source
# For example if Np=1 and Nc=2,
# grid = array([[ 0, 1], # busbar
# # Rs
# [ 2, 3],
# # V
# [ 4, 5],
# # Ri
# [ 6, 7],
# # Rs
# [ 8, 9],
# # V
# [10, 11],
# # Ri
# [12, 13]] # busbar)
# Connections are across busbars in first and last rows, and down each column
# See "01 Getting Started.ipynb"
# Build data with ['element type', node1, node2, value]
netlist = []
num_Rb = 0
num_V = 0
desc = []
node1 = []
node2 = []
value = []
# -ve busbars (bottom row of the grid)
bus_nodes = [grid[0, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.append("Rbn" + str(num_Rb))
num_Rb += 1
node1.append(nodes[i])
node2.append(nodes[i + 1])
value.append(Rb)
num_Rs = 0
num_Ri = 0
# Series resistors and voltage sources
cols = np.arange(Nc)
rows = np.arange(Nr)[:-1]
rtype = ["Rc", "V", "Ri"] * Ns
for col in cols:
# Go down the column alternating Rs, V, Ri connections between nodes
nodes = grid[:, col]
for row in rows:
if rtype[row] == "Rc":
# Inter(c)onnection / weld
desc.append(rtype[row] + str(num_Rs))
num_Rs += 1
val = Rc
elif rtype[row] == "Ri":
# Internal resistor
desc.append(rtype[row] + str(num_Ri))
num_Ri += 1
val = Ri
else:
# Voltage source
desc.append("V" + str(num_V))
num_V += 1
val = V
node1.append(nodes[row + 1])
node2.append(nodes[row])
value.append(val)
# netlist.append(netline)
# +ve busbar (top row of the grid)
bus_nodes = [grid[-1, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.append("Rbp" + str(num_Rb))
num_Rb += 1
node1.append(nodes[i])
node2.append(nodes[i + 1])
value.append(Rb)
desc = np.asarray(desc)
node1 = np.asarray(node1)
node2 = np.asarray(node2)
value = np.asarray(value)
main_grid = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": x[node1 - 1],
"node1_y": y[node1 - 1],
"node2_x": x[node2 - 1],
"node2_y": y[node2 - 1],
}
# Current source - spans the entire pack
if (terminals == "left") or (terminals is None):
t_nodes = [0, 0]
elif terminals == "right":
t_nodes = [-1, -1]
elif terminals == "left-right":
t_nodes = [0, -1]
elif terminals == "right-left":
t_nodes = [-1, 0]
elif isinstance(terminals, (list, np.ndarray)):
t_nodes = terminals
else:
raise ValueError(
'Please specify a valid terminals argument: "left", '
+ '"right", "left-right" or "right-left" or a list or '
+ "array of nodes"
)
# terminal nodes
t1 = grid[-1, t_nodes[0]]
t2 = grid[0, t_nodes[1]]
# terminal coords
x1 = x[t1 - 1]
x2 = x[t2 - 1]
y1 = y[t1 - 1]
y2 = y[t2 - 1]
nn = grid.max() + 1 # next node
# coords of nodes forming current source loop
if terminals == "left" or (
isinstance(terminals, (list, np.ndarray)) and np.all(np.array(terminals) == 0)
):
ix = x1 - 1
dy = 0
elif terminals == "right" or (
isinstance(terminals, (list, np.ndarray)) and np.all(np.array(terminals) == -1)
):
ix = x1 + 1
dy = 0
else:
ix = -1
dy = 1
if dy == 0:
desc = ["Rtp1", "I0", "Rtn1"]
xs = np.array([x1, ix, ix, x2])
ys = np.array([y1, y1, y2, y2])
node1 = [t1, nn, 0]
node2 = [nn, 0, t2]
value = [Rt, I, Rt]
num_elem = 3
else:
desc = ["Rtp0", "Rtp1", "I0", "Rtn1", "Rtn0"]
xs = np.array([x1, x1, ix, ix, x2, x2])
ys = np.array([y1, y1 + dy, y1 + dy, 0 - dy, 0 - dy, y2])
node1 = [t1, nn, nn + 1, 0, nn + 2]
node2 = [nn, nn + 1, 0, nn + 2, t2]
hRt = Rt / 2
value = [hRt, hRt, I, hRt, hRt]
num_elem = 5
desc = np.asarray(desc)
node1 = np.asarray(node1)
node2 = np.asarray(node2)
value = np.asarray(value)
current_loop = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": xs[:num_elem],
"node1_y": ys[:num_elem],
"node2_x": xs[1:],
"node2_y": ys[1:],
}
for key in main_grid.keys():
main_grid[key] = np.concatenate((main_grid[key], current_loop[key]))
netlist = pd.DataFrame(main_grid)
if plot:
lp.simple_netlist_plot(netlist)
lp.logger.notice("Circuit created")
return netlist
def solve_circuit(netlist):
"""
Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit.
The MNA equations are a linear system Ax = z.
See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
Args:
netlist (pandas.DataFrame):
A netlist of circuit elements with format desc, node1, node2, value.
Returns:
(np.ndarray, np.ndarray):
- V_node: Voltages of the voltage elements
- I_batt: Currents of the current elements
"""
timer = pybamm.Timer()
desc = np.array(netlist["desc"]).astype("<U16")
node1 = np.array(netlist["node1"])
node2 = np.array(netlist["node2"])
value = np.array(netlist["value"])
nLines = netlist.shape[0]
n = np.concatenate((node1, node2)).max() # Number of nodes (highest node number)
m = 0 # "m" is the number of voltage sources, determined below.
V_elem = ["V", "O", "E", "H"]
for nm in desc:
if nm[0] in V_elem:
m += 1
# Construct the A matrix, which will be a (n+m) x (n+m) matrix
# A = [G B]
# [B.T D]
# G matrix tracks the conductance between nodes (consists of floats)
# B matrix tracks voltage sources between nodes (consists of -1, 0, 1)
# D matrix is always zero for non-dependent sources
# Construct the z vector with length (n+m)
# z = [i]
# [e]
# i is currents and e is voltages
# Use lil matrices to construct the A array
G = sp.sparse.lil_matrix((n, n))
B = sp.sparse.lil_matrix((n, m))
D = sp.sparse.lil_matrix((m, m))
i = np.zeros([n, 1])
e = np.zeros([m, 1])
"""
% We need to keep track of the number of voltage sources we've parsed
% so far as we go through file. We start with zero.
"""
vsCnt = 0
"""
% This loop does the bulk of filling in the arrays. It scans line by line
% and fills in the arrays depending on the type of element found on the
% current line.
% See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
"""
for k1 in range(nLines):
n1 = node1[k1] - 1 # get the two node numbers in python index format
n2 = node2[k1] - 1
elem = desc[k1][0]
if elem == "R":
# Resistance elements: fill the G matrix only
g = 1 / value[k1] # conductance = 1 / R
"""
% Here we fill in G array by adding conductance.
% The procedure is slightly different if one of the nodes is
% ground, so check for those accordingly.
"""
if n1 == -1: # -1 is the ground node
G[n2, n2] = G[n2, n2] + g
elif n2 == -1:
G[n1, n1] = G[n1, n1] + g
else:
G[n1, n1] = G[n1, n1] + g
G[n2, n2] = G[n2, n2] + g
G[n1, n2] = G[n1, n2] - g
G[n2, n1] = G[n2, n1] - g
elif elem == "V":
# Voltage elements: fill the B matrix and the e vector
if n1 >= 0:
B[n1, vsCnt] = B[n1, vsCnt] + 1
if n2 >= 0:
B[n2, vsCnt] = B[n2, vsCnt] - 1
e[vsCnt] = value[k1]
vsCnt += 1
elif elem == "I":
# Current elements: fill the i vector only
if n1 >= 0:
i[n1] = i[n1] - value[k1]
if n2 >= 0:
i[n2] = i[n2] + value[k1]
# Construct final matrices from sub-matrices
upper = sp.sparse.hstack((G, B))
lower = sp.sparse.hstack((B.T, D))
A = sp.sparse.vstack((upper, lower))
# Convert a to csr sparse format for more efficient solving of the linear system
# csr works slighhtly more robustly than csc
A_csr = sp.sparse.csr_matrix(A)
z = np.vstack((i, e))
toc_setup = timer.time()
lp.logger.debug(f"Circuit set up in {toc_setup}")
# Scipy
# X = solve(A, z).flatten()
X = sp.sparse.linalg.spsolve(A_csr, z).flatten()
# Pypardiso
# X = pypardiso.spsolve(Aspr, z).flatten()
# amg
# ml = pyamg.smoothed_aggregation_solver(Aspr)
# X = ml.solve(b=z, tol=1e-6, maxiter=10, accel="bicgstab")
# include ground node (0V)
# it is counter-intuitive that z is [i,e] while X is [V,I], but this is correct
V_node = np.zeros(n + 1)
V_node[1:] = X[:n]
I_batt = X[n:]
toc = timer.time()
lp.logger.debug(f"Circuit solved in {toc - toc_setup}")
lp.logger.info(f"Circuit set up and solved in {toc}")
return V_node, I_batt
def solve_circuit_vectorized(netlist):
"""
Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit.
The MNA equations are a linear system Ax = z.
See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
Args:
netlist (pandas.DataFrame):
A netlist of circuit elements with format desc, node1, node2, value.
Returns:
(np.ndarray, np.ndarray):
- V_node: Voltages of the voltage elements
- I_batt: Currents of the current elements
"""
timer = pybamm.Timer()
desc = np.array(netlist["desc"]).astype("<U1") # just take first character
desc2 = np.array(netlist["desc"]).astype("<U2") # take first 2 characters
node1 = np.array(netlist["node1"])
node2 = np.array(netlist["node2"])
value = np.array(netlist["value"])
n = np.concatenate((node1, node2)).max() # Number of nodes (highest node number)
m = np.sum(desc == "V") # we only use V in liionpack
# Construct the A matrix, which will be a (n+m) x (n+m) matrix
# A = [G B]
# [B.T D]
# G matrix tracks the conductance between nodes (consists of floats)
# B matrix tracks voltage sources between nodes (consists of -1, 0, 1)
# D matrix is always zero for non-dependent sources
# Construct the z vector with length (n+m)
# z = [i]
# [e]
# i is currents and e is voltages
# Use lil matrices to construct the A array
G = sp.sparse.lil_matrix((n, n))
B = sp.sparse.lil_matrix((n, m))
D = sp.sparse.lil_matrix((m, m))
i = np.zeros([n, 1])
e = np.zeros([m, 1])
"""
% This loop does the bulk of filling in the arrays. It scans line by line
% and fills in the arrays depending on the type of element found on the
% current line.
% See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html
"""
node1 = node1 - 1 # get the two node numbers in python index format
node2 = node2 - 1
# Resistance elements: fill the G matrix only
g = np.ones(len(value)) * np.nan
n1_ground = node1 == -1
n2_ground = node2 == -1
r_list = [d for d in np.unique(desc2) if d[0] == "R"]
for r_string in r_list:
R_map = desc2 == r_string
g[R_map] = 1 / value[R_map] # conductance = 1 / R
R_map_n1_ground = np.logical_and(R_map, n1_ground)
R_map_n2_ground = | np.logical_and(R_map, n2_ground) | numpy.logical_and |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = pd.Timestamp('2020-09-04', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CMES',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
# This test ensures ensures that 'phantom' positions do not appear in
# context.portfolio.positions in the case that a position has been
# entered and fully exited.
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if not context.ordered:
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if not context.exited:
amounts = [pos.amount for pos
in itervalues(context.portfolio.positions)]
if (
len(amounts) > 0 and
all([(amount == 1) for amount in amounts])
):
for stock in context.portfolio.positions:
context.order(context.sid(stock), -1)
context.exited = True
# Should be 0 when all positions are exited.
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
sids=self.ASSET_FINDER_EQUITY_SIDS,
)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(result.iloc[i,:]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
# Algorithm that tries to buy with extremely low stops/limits and tries
# to sell with extremely high versions of same. Should not end up with
# any positions for reasonable data.
def handle_data(algo, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
algo.order(asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
algo.order(asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
algo.order(asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
algo.order(asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
algo.order(asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
algo.order(asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
algo.order(asset, 100, limit_price=.00000001)
algo.order(asset, -100, stop_price=.00000001)
daily_stats = self.run_algorithm(handle_data=handle_data)
# Verify that positions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_all(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(
us_equities=PerTrade(0), us_futures=PerTrade(0),
)
algo.set_slippage(
us_equities=FixedSlippage(0),
us_futures=FixedSlippage(0),
)
def handle_data(algo, data):
if not algo.ordered:
for s, amount in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(
position_weights=algo.portfolio.current_portfolio_weights,
)
daily_stats = self.run_algorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
initialize=initialize,
handle_data=handle_data,
)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the sum of
# cash ($905.00) plus the value of all equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_in_range(
cls.data_start,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(minutes_count) + 1
split_data = pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': 100 * minutes_arr,
},
index=asset_minutes,
)
split_data.iloc[480:] = split_data.iloc[480:] / 2.0
for sid in (1, 8554):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
yield 2, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
50,
)
yield cls.SPLIT_ASSET_SID, split_data
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
def test_data_in_bts_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# fetching data at midnight gets us the previous market minute's data
self.assertEqual(240, results.iloc[0].the_price1)
self.assertEqual(242, results.iloc[0].the_high1)
# make sure that price is ffilled, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(np.isnan(results.iloc[0].the_high2))
# 10-minute history
# asset1 day1 price should be 331-390
np.testing.assert_array_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
np.testing.assert_array_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
np.testing.assert_array_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
np.testing.assert_array_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be all NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
np.testing.assert_array_equal(
np.full(19, np.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
np.testing.assert_array_equal(
np.full(40, np.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1d",
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 minutes
self.assertTrue(np.isnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertEqual(352, algo.history_values[0]["high"][2][0])
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Asset starts with price 1 on 1/05 and increases by 1 every minute.
# Simulation starts on 1/06, where the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, where the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, positions value should by 780, same as without split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, portfolio value is the same as without split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
# FIXME: Pass a benchmark explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cal = cls.trading_calendars[Equity]
sessions = cal.sessions_in_range(cls.START_DATE, cls.END_DATE)
frame = pd.DataFrame({
'close': 10., 'high': 10.5, 'low': 9.5, 'open': 10., 'volume': 100,
}, index=sessions)
for sid in sids:
yield sid, frame
def test_noop(self):
self.run_algorithm(
initialize=initialize_noop,
handle_data=handle_data_noop,
)
def test_noop_string(self):
self.run_algorithm(script=noop_algo)
def test_no_handle_data(self):
self.run_algorithm(script=no_handle_data)
def test_api_calls(self):
self.run_algorithm(
initialize=initialize_api,
handle_data=handle_data_api,
)
def test_api_calls_string(self):
self.run_algorithm(script=api_algo)
def test_api_get_environment(self):
platform = 'zipline'
algo = self.make_algo(
script=api_get_environment_algo,
platform=platform,
)
algo.run()
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
self.run_algorithm(script=api_symbol_algo)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = self.make_algo(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
)
results = test_algo.run()
# flatten the list of txns
all_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, means -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"].iloc[1][0]["commission"])
@parameterized.expand(
[
('no_minimum_commission', 0,),
('default_minimum_commission', 0,),
('alternate_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"min_trade_cost={0}))".format(minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
# XXX: This is the last remaining consumer of
# create_daily_trade_source.
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.asset_finder, self.trading_calendar
)
data_portal = create_data_portal_from_trade_history(
self.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades}
)
test_algo = self.make_algo(
data_portal=data_portal,
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
)
results = test_algo.run()
all_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 67)
# all_orders are all the incremental versions of the
# orders as each new fill comes in.
all_orders = list(toolz.concat(results['orders']))
if minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its filled amount * 0.02
for order_ in all_orders:
self.assertAlmostEqual(
order_["filled"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the min_trade_cost
for order_ in all_orders:
if order_["filled"] > 0:
self.assertAlmostEqual(
max(order_["filled"] * 0.02, minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
finally:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = self.make_algo(script=code)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run()
def test_algo_record_vars(self):
test_algo = self.make_algo(script=record_variables)
results = test_algo.run()
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_nan(self):
test_algo = self.make_algo(script=record_float_magic % 'nan')
results = test_algo.run()
for i in range(1, 252):
self.assertTrue(np.isnan(results.iloc[i-1]["data"]))
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter()
multi_test_algo = self.make_algo(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
it = zip(context.assets, {share_counts})
for asset, shares in it:
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
)
multi_stats = multi_test_algo.run()
self.assertFalse(multi_blotter.order_batch_called)
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
)
batch_stats = batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
)
batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
self.run_algorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = self.make_algo(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run()
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
self.run_algorithm(script=access_portfolio_in_init)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
self.run_algorithm(script=access_account_in_init)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be called with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(sim_params=params, script=call_without_kwargs)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be called with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(script=call_with_kwargs, sim_params=params)
@parameterized.expand([('history', call_with_bad_kwargs_history),
('current', call_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object called with bad kwargs return
a meaningful TypeError that we create, rather than an unhelpful cython
error
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, inputs):
keyword = name.split('__')[1]
algo = self.make_algo(script=inputs[0])
with self.assertRaises(TypeError) as cm:
algo.run()
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if inputs[2] else '',
inputs[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
)
@parameterized.expand(
[('bad_kwargs', call_with_bad_kwargs_get_open_orders),
('good_kwargs', call_with_good_kwargs_get_open_orders),
('no_kwargs', call_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = self.make_algo(script=script)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run()
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
results = self.run_algorithm(script=empty_positions)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(all(num_positions == 0))
self.assertTrue(all(amounts == 0))
def test_schedule_function_time_rule_positionally_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we assume that means
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.append(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.append(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = self.make_algo(script=algocode, sim_params=sim_params)
algo.run()
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-09', tz='UTC')
# XXX: This suite only has daily data for sid 0 and only has minutely data
# for sid 1.
sids = ASSET_FINDER_EQUITY_SIDS = (0, 1)
DAILY_SID = 0
MINUTELY_SID = 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendar.minutes_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(100, 100 + len(minutes), 1)
opens = closes
highs = closes + 5
lows = closes - 5
frame = pd.DataFrame(
index=minutes,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.MINUTELY_SID, frame
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
days = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(10.0, 10.0 + len(days), 1.0)
opens = closes
highs = closes + 0.5
lows = closes - 0.5
frame = pd.DataFrame(
index=days,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.DAILY_SID, frame
@parameterized.expand([
('target', 151000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = self.make_algo(
script=algocode,
capital_changes=capital_changes,
sim_params=SimulationParameters(
start_session=self.START_DATE,
end_session=self.END_DATE,
trading_calendar=self.nyse_calendar,
)
)
# We call get_generator rather than `run()` here because we care about
# the raw capital change packets.
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 151000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = np.array([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = np.array([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0) / 100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +50000
(151000.0 + 2000.0) / 151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0) / 153000.0 - 1.0,
])
expected_daily['pnl'] = np.array([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = np.array([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
np.array([100000.0] * 5) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = np.array([
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat],
err_msg='daily ' + stat,
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat],
err_msg='cumulative ' + stat,
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {
pd.Timestamp(datestr, tz='UTC'): {
'type': change_type,
'value': value
}
for datestr, value in values
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
expected_daily = {}
expected_capital_changes = np.array([0.0, 1000.0, 0.0])
if change_loc == 'intraday':
# Fills at 491, +500 capital change comes at 638 (17:00) and
# 698 (18:00), ends day at 879
day2_return = (
(1388.0 + 149.0 + 147.0) / 1388.0 *
(2184.0 + 60.0 + 60.0) / 2184.0 *
(2804.0 + 181.0 + 181.0) / 2804.0 - 1.0
)
else:
# Fills at 491, ends day at 879, capital change +1000
day2_return = (2388.0 + 390.0 + 388.0) / 2388.0 - 1
expected_daily['returns'] = np.array([
# Fills at 101, ends day at 489
(1000.0 + 489 - 101) / 1000.0 - 1.0,
day2_return,
# Fills at 881, ends day at 1269
(3166.0 + 390.0 + 390.0 + 388.0) / 3166.0 - 1.0,
])
expected_daily['pnl'] = np.array([
388.0,
390.0 + 388.0,
390.0 + 390.0 + 388.0,
])
expected_daily['capital_used'] = np.array([
-101.0, -491.0, -881.0
])
expected_daily['ending_cash'] = \
np.array([1000.0] * 3) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
if change_loc == 'intraday':
# Capital changes come after day start
expected_daily['starting_cash'] -= expected_capital_changes
expected_daily['starting_value'] = np.array([
0.0, 489.0, 879.0 * 2
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 3),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 3),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_minute_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
emission_rate='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
minute_perf = [r['minute_perf'] for r in results if 'minute_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
# Minute perfs are cumulative for the day
expected_minute = {}
capital_changes_after_start = np.array([0.0] * 1170)
if change_loc == 'intraday':
capital_changes_after_start[539:599] = 500.0
capital_changes_after_start[599:780] = 1000.0
expected_minute['pnl'] = np.array([0.0] * 1170)
expected_minute['pnl'][:2] = 0.0
expected_minute['pnl'][2:392] = 1.0
expected_minute['pnl'][392:782] = 2.0
expected_minute['pnl'][782:] = 3.0
for start, end in ((0, 390), (390, 780), (780, 1170)):
expected_minute['pnl'][start:end] = \
np.cumsum(expected_minute['pnl'][start:end])
expected_minute['capital_used'] = np.concatenate((
[0.0] * 1, [-101.0] * 389,
[0.0] * 1, [-491.0] * 389,
[0.0] * 1, [-881.0] * 389,
))
# +1000 capital changes comes before the day start if interday
day2adj = 0.0 if change_loc == 'intraday' else 1000.0
expected_minute['starting_cash'] = np.concatenate((
[1000.0] * 390,
# 101 spent on 1/03
[1000.0 - 101.0 + day2adj] * 390,
# 101 spent on 1/03, 491 on 1/04, +1000 capital change on 1/04
[1000.0 - 101.0 - 491.0 + 1000] * 390
))
expected_minute['ending_cash'] = \
expected_minute['starting_cash'] + \
expected_minute['capital_used'] + \
capital_changes_after_start
expected_minute['starting_value'] = np.concatenate((
[0.0] * 390,
[489.0] * 390,
[879.0 * 2] * 390
))
expected_minute['ending_value'] = \
expected_minute['starting_value'] + \
expected_minute['pnl'] - \
expected_minute['capital_used']
expected_minute['portfolio_value'] = \
expected_minute['ending_value'] + \
expected_minute['ending_cash']
expected_minute['returns'] = \
expected_minute['pnl'] / \
(expected_minute['starting_value'] +
expected_minute['starting_cash'])
# If the change is interday, we can just calculate the returns from
# the pnl, starting_value and starting_cash. If the change is intraday,
# the returns after the change have to be calculated from two
# subperiods
if change_loc == 'intraday':
# The last packet (at 1/04 16:59) before the first capital change
prev_subperiod_return = expected_minute['returns'][538]
# From 1/04 17:00 to 17:59
cur_subperiod_pnl = \
expected_minute['pnl'][539:599] - expected_minute['pnl'][538]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][538]] * 60)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][538] + 500] * 60)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][539:599] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last packet (at 1/04 17:59) before the second capital change
prev_subperiod_return = expected_minute['returns'][598]
# From 1/04 18:00 to 21:00
cur_subperiod_pnl = \
expected_minute['pnl'][599:780] - expected_minute['pnl'][598]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][598]] * 181)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][598] + 500] * 181)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][599:780] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last minute packet of each day
expected_daily = {
k: np.array([v[389], v[779], v[1169]])
for k, v in iteritems(expected_minute)
}
stats = [
'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value', 'returns'
]
expected_cumulative = deepcopy(expected_minute)
# "Add" daily return from 1/03 to minute returns on 1/04 and 1/05
# "Add" daily return from 1/04 to minute returns on 1/05
expected_cumulative['returns'][390:] = \
(expected_cumulative['returns'][390:] + 1) * \
(expected_daily['returns'][0] + 1) - 1
expected_cumulative['returns'][780:] = \
(expected_cumulative['returns'][780:] + 1) * \
(expected_daily['returns'][1] + 1) - 1
# Add daily pnl/capital_used from 1/03 to 1/04 and 1/05
# Add daily pnl/capital_used from 1/04 to 1/05
expected_cumulative['pnl'][390:] += expected_daily['pnl'][0]
expected_cumulative['pnl'][780:] += expected_daily['pnl'][1]
expected_cumulative['capital_used'][390:] += \
expected_daily['capital_used'][0]
expected_cumulative['capital_used'][780:] += \
expected_daily['capital_used'][1]
# starting_cash, starting_value are same as those of the first daily
# packet
expected_cumulative['starting_cash'] = \
np.repeat(expected_daily['starting_cash'][0:1], 1170)
expected_cumulative['starting_value'] = \
np.repeat(expected_daily['starting_value'][0:1], 1170)
# extra cumulative packet per day from the daily packet
for stat in stats:
for i in (390, 781, 1172):
expected_cumulative[stat] = np.insert(
expected_cumulative[stat],
i,
expected_cumulative[stat][i-1]
)
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in minute_perf]),
expected_minute[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
class TestGetDatetime(zf.WithMakeAlgo, zf.ZiplineTestCase):
SIM_PARAMS_DATA_FREQUENCY = 'minute'
START_DATE = to_utc('2014-01-02 9:31')
END_DATE = to_utc('2014-01-03 9:31')
ASSET_FINDER_EQUITY_SIDS = 0, 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
if context.first_bar:
if dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
algo = self.make_algo(script=algo)
algo.run()
self.assertFalse(algo.first_bar)
class TestTradingControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sid = 133
sids = ASSET_FINDER_EQUITY_SIDS = 133, 134
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = True
@classmethod
def init_class_fixtures(cls):
super(TestTradingControls, cls).init_class_fixtures()
cls.asset = cls.asset_finder.retrieve_asset(cls.sid)
cls.another_asset = cls.asset_finder.retrieve_asset(134)
def _check_algo(self,
algo,
expected_order_count,
expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
self.assertEqual(algo.order_count, expected_order_count)
def check_algo_succeeds(self, algo, order_count=4):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, order_count, None)
def check_algo_fails(self, algo, order_count):
self._check_algo(algo,
order_count,
TradingControlViolation)
def test_set_max_position_size(self):
def initialize(self, asset, max_shares, max_notional):
self.set_slippage(FixedSlippage())
self.order_count = 0
self.set_max_position_size(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 3)
# Buy three shares four times. Should bail due to max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 2)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
asset=self.another_asset,
max_shares=10,
max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
max_shares=10,
max_notional=61.0,
asset=None,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
def test_set_asset_restrictions(self):
def initialize(algo, sid, restrictions, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions, on_error)
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
# Set HistoricalRestrictions for one sid for the entire simulation,
# and fail.
rlm = HistoricalRestrictions([
Restriction(
self.sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN)
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# Set StaticRestrictions for one sid and fail.
rlm = StaticRestrictions([self.sid])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# just log an error on the violation if we choose not to fail.
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='log',
initialize=initialize,
handle_data=handle_data,
)
with make_test_handler(self) as log_catcher:
self.check_algo_succeeds(algo)
logs = [r.message for r in log_catcher.records]
self.assertIn("Order for 100 shares of Equity(133 [A]) at "
"2006-01-04 21:00:00+00:00 violates trading constraint "
"RestrictedListOrder({})", logs)
self.assertFalse(algo.could_trade)
# set the restricted list to exclude the sid, and succeed
rlm = HistoricalRestrictions([
Restriction(
sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN) for sid in [134, 135, 136]
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
self.assertTrue(algo.could_trade)
@parameterized.expand([
('order_first_restricted_sid', 0),
('order_second_restricted_sid', 1)
])
def test_set_multiple_asset_restrictions(self, name, to_order_idx):
def initialize(algo, restrictions1, restrictions2, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions1, on_error)
algo.set_asset_restrictions(restrictions2, on_error)
def handle_data(algo, data):
algo.could_trade1 = data.can_trade(algo.sid(self.sids[0]))
algo.could_trade2 = data.can_trade(algo.sid(self.sids[1]))
algo.order(algo.sid(self.sids[to_order_idx]), 100)
algo.order_count += 1
rl1 = StaticRestrictions([self.sids[0]])
rl2 = StaticRestrictions([self.sids[1]])
algo = self.make_algo(
restrictions1=rl1,
restrictions2=rl2,
initialize=initialize,
handle_data=handle_data,
on_error='fail',
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade1)
self.assertFalse(algo.could_trade2)
def test_set_do_not_order_list(self):
def initialize(self, restricted_list):
self.order_count = 0
self.set_do_not_order_list(restricted_list, on_error='fail')
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
rlm = [self.sid]
algo = self.make_algo(
restricted_list=rlm,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
def test_set_max_order_size(self):
def initialize(algo, asset, max_shares, max_notional):
algo.order_count = 0
algo.set_max_order_size(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=10,
max_notional=500.0,
)
self.check_algo_succeeds(algo)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=3,
max_notional=500.0,
)
self.check_algo_fails(algo, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=10,
max_notional=40.0,
)
self.check_algo_fails(algo, 3)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.another_asset,
max_shares=1,
max_notional=1.0,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=None,
max_shares=1,
max_notional=1.0,
)
self.check_algo_fails(algo, 0)
def test_set_max_order_count(self):
def initialize(algo, count):
algo.order_count = 0
algo.set_max_order_count(count)
def handle_data(algo, data):
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo = self.make_algo(
count=3,
initialize=initialize,
handle_data=handle_data,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 3)
def test_set_max_order_count_minutely(self):
sim_params = self.make_simparams(data_frequency='minute')
def initialize(algo, max_orders_per_day):
algo.minute_count = 0
algo.order_count = 0
algo.set_max_order_count(max_orders_per_day)
# Order 5 times twice in a single day, and set a max order count of
# 9. The last order of the second batch should fail.
def handle_data(algo, data):
if algo.minute_count == 0 or algo.minute_count == 100:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_orders_per_day=9,
sim_params=sim_params,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 9)
# Set a limit of 5 orders per day, and order 5 times in the first
# minute of each day. This should succeed because the counter gets
# reset each day.
def handle_data(algo, data):
if (algo.minute_count % 390) == 0:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_orders_per_day=5,
sim_params=sim_params,
)
algo.run()
# 5 orders per day times 4 days.
self.assertEqual(algo.order_count, 20)
def test_long_only(self):
def initialize(algo):
algo.order_count = 0
algo.set_long_only()
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
self.run_algorithm(initialize=initialize, handle_data=handle_data)
class TestAssetDateBounds(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-01-03', tz='UTC')
SIM_PARAMS_START_DATE = END_DATE # Only run for one day.
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
T = partial(pd.Timestamp, tz='UTC')
return pd.DataFrame.from_records([
{'sid': 1,
'symbol': 'OLD',
'start_date': T('1990'),
'end_date': T('1991'),
'exchange': 'TEST'},
{'sid': 2,
'symbol': 'NEW',
'start_date': T('2017'),
'end_date': T('2018'),
'exchange': 'TEST'},
{'sid': 3,
'symbol': 'GOOD',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST'},
])
def test_asset_date_bounds(self):
def initialize(algo):
algo.ran = False
algo.register_trading_control(AssetDateBounds(on_error='fail'))
def handle_data(algo, data):
# This should work because sid 3 is valid during the algo lifetime.
algo.order(algo.sid(3), 1)
# Sid already expired.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(1), 1)
# Sid doesn't exist yet.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(2), 1)
algo.ran = True
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
algo.run()
self.assertTrue(algo.ran)
class TestAccountControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sidint, = ASSET_FINDER_EQUITY_SIDS = (133,)
BENCHMARK_SID = None
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(data={
'close': [10., 10., 11., 11.],
'open': [10., 10., 11., 11.],
'low': [9.5, 9.5, 10.45, 10.45],
'high': [10.5, 10.5, 11.55, 11.55],
'volume': [100, 100, 100, 300],
}, index=cls.equity_daily_bar_days)
yield cls.sidint, frame
def _check_algo(self, algo, expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
def check_algo_succeeds(self, algo):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, None)
def check_algo_fails(self, algo):
self._check_algo(algo, AccountControlViolation)
def test_set_max_leverage(self):
def initialize(algo, max_leverage):
algo.set_max_leverage(max_leverage=max_leverage)
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo.record(latest_time=algo.get_datetime())
# Set max leverage to 0 so buying one share fails.
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_leverage=0,
)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_leverage=1,
)
self.check_algo_succeeds(algo)
def test_set_min_leverage(self):
def initialize(algo, min_leverage, grace_period):
algo.set_min_leverage(
min_leverage=min_leverage, grace_period=grace_period
)
def handle_data(algo, data):
algo.order_target_percent(algo.sid(self.sidint), .5)
algo.record(latest_time=algo.get_datetime())
# Helper for not having to pass init/handle_data at each callsite.
def make_algo(min_leverage, grace_period):
return self.make_algo(
initialize=initialize,
handle_data=handle_data,
min_leverage=min_leverage,
grace_period=grace_period,
)
# Set min leverage to 1.
# The algorithm will succeed because it doesn't run for more
# than 10 days.
offset = pd.Timedelta('10 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_succeeds(algo)
# The algorithm will fail because it doesn't reach a min leverage of 1
# after 1 day.
offset = pd.Timedelta('1 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Increase the offset to 2 days, and the algorithm fails a day later
offset = pd.Timedelta('2 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set the min_leverage to .0001 and the algorithm succeeds.
algo = make_algo(min_leverage=.0001, grace_period=offset)
self.check_algo_succeeds(algo)
class TestFuturesAlgo(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
TRADING_CALENDAR_STRS = ('us_futures',)
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
BENCHMARK_SID = None
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'CLG16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2015-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-18', tz='UTC'),
'exchange': 'TEST',
},
},
orient='index',
)
def test_futures_history(self):
algo_code = dedent(
"""
from datetime import time
from zipline.api import (
date_rules,
get_datetime,
schedule_function,
sid,
time_rules,
)
def initialize(context):
context.history_values = []
schedule_function(
make_history_call,
date_rules.every_day(),
time_rules.market_open(),
)
schedule_function(
check_market_close_time,
date_rules.every_day(),
time_rules.market_close(),
)
def make_history_call(context, data):
# Ensure that the market open is 6:31am US/Eastern.
open_time = get_datetime().tz_convert('US/Eastern').time()
assert open_time == time(6, 31)
context.history_values.append(
data.history(sid(1), 'close', 5, '1m'),
)
def check_market_close_time(context, data):
# Ensure that this function is called at 4:59pm US/Eastern.
# By default, `market_close()` uses an offset of 1 minute.
close_time = get_datetime().tz_convert('US/Eastern').time()
assert close_time == time(16, 59)
"""
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
algo.run()
# Assert that we were able to retrieve history data for minutes outside
# of the 6:31am US/Eastern to 5:00pm US/Eastern futures open times.
np.testing.assert_array_equal(
algo.history_values[0].index,
pd.date_range(
'2016-01-06 6:27',
'2016-01-06 6:31',
freq='min',
tz='US/Eastern',
),
)
np.testing.assert_array_equal(
algo.history_values[1].index,
pd.date_range(
'2016-01-07 6:27',
'2016-01-07 6:31',
freq='min',
tz='US/Eastern',
),
)
# Expected prices here are given by the range values created by the
# default `make_future_minute_bar_data` method.
np.testing.assert_array_equal(
algo.history_values[0].values, list(map(float, range(2196, 2201))),
)
np.testing.assert_array_equal(
algo.history_values[1].values, list(map(float, range(3636, 3641))),
)
@staticmethod
def algo_with_slippage(slippage_model):
return dedent(
"""
from zipline.api import (
commission,
order,
set_commission,
set_slippage,
sid,
slippage,
get_datetime,
)
def initialize(context):
commission_model = commission.PerFutureTrade(0)
set_commission(us_futures=commission_model)
slippage_model = slippage.{model}
set_slippage(us_futures=slippage_model)
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), 10)
context.ordered = True
context.order_price = data.current(sid(1), 'price')
"""
).format(model=slippage_model)
def test_fixed_future_slippage(self):
algo_code = self.algo_with_slippage('FixedSlippage(spread=0.10)')
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
# Add 1 to the expected price because the order does not fill until the
# bar after the price is recorded.
expected_spread = 0.05
expected_price = (algo.order_price + 1) + expected_spread
self.assertEqual(txn['price'], expected_price)
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
def test_volume_contract_slippage(self):
algo_code = self.algo_with_slippage(
'VolumeShareSlippage(volume_limit=0.05, price_impact=0.1)',
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# There should be no commissions.
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
# With a volume limit of 0.05, and a total volume of 100 contracts
# traded per minute, we should require 2 transactions to order 10
# contracts.
self.assertEqual(len(all_txns), 2)
for i, txn in enumerate(all_txns):
# Add 1 to the order price because the order does not fill until
# the bar after the price is recorded.
order_price = algo.order_price + i + 1
expected_impact = order_price * 0.1 * (0.05 ** 2)
expected_price = order_price + expected_impact
self.assertEqual(txn['price'], expected_price)
class TestAnalyzeAPIMethod(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-05', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_analyze_called(self):
self.perf_ref = None
def initialize(context):
pass
def handle_data(context, data):
pass
def analyze(context, perf):
self.perf_ref = perf
algo = self.make_algo(
initialize=initialize, handle_data=handle_data, analyze=analyze,
)
results = algo.run()
self.assertIs(results, self.perf_ref)
class TestOrderCancelation(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
ASSET_FINDER_EQUITY_SIDS = (1,)
ASSET_FINDER_EQUITY_SYMBOLS = ('ASSET1',)
BENCHMARK_SID = None
code = dedent(
"""
from zipline.api import (
sid, order, set_slippage, slippage, VolumeShareSlippage,
set_cancel_policy, cancel_policy, EODCancel
)
def initialize(context):
set_slippage(
slippage.VolumeShareSlippage(
volume_limit=1,
price_impact=0
)
)
{0}
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), {1})
context.ordered = True
""",
)
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(1, 1 + minutes_count)
# normal test data, but volume is pinned at 1 share per minute
yield 1, pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': np.full(minutes_count, 1.0),
},
index=asset_minutes,
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
yield 1, pd.DataFrame(
{
'open': | np.full(3, 1, dtype=np.float64) | numpy.full |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std*np.nanstd(p_mu[use_idx]),
duration[1,0], duration[1,1], 1)
seq['high_delta'] = threshold_crossing(p_delta, np.nanmean(p_delta[use_idx]), duration[2,0], duration[2,1], 1)
seq['high_sigma'] = threshold_crossing(p_sigma, np.nanmean(p_sigma[use_idx]), duration[3,0], duration[3,1], 1)
seq['high_gamma'] = threshold_crossing(p_gamma, np.nanmean(p_gamma[use_idx]), duration[4,0], duration[4,1], 1)
# Sleep-State Rules
idx = {}
for k in seq:
tmp = [list(range(i,j+1)) for (i,j) in seq[k]]
# now idea why this works to flatten a list
# idx[k] = sum(tmp, [])
# alternative that I understand:
if len(tmp) == 0:
idx[k] = np.array([])
else:
idx[k] = np.array(reduce(lambda x,y: x+y, tmp))
idx['low_emg'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_emg']))
idx['low_delta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_delta']))
idx['low_theta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_theta']))
#REM Sleep: thdel up, emg down, delta down
a = np.intersect1d(idx['high_theta'], idx['low_delta'])
# non high_emg phases
b = np.setdiff1d(a, idx['high_emg'])
rem = get_sequences(b, duration[0,1])
rem_idx = reduce(lambda x,y: np.concatenate((x,y)), rem)
# SWS Sleep
# delta high, no theta, no emg
a = np.setdiff1d(idx['high_delta'], idx['high_emg']) # no emg activation
b = np.setdiff1d(a, idx['high_theta']) # no theta;
sws = get_sequences(b)
sws_idx = reduce(lambda x,y: np.concatenate((x,y)), sws)
#print a
# Wake
# low delta + high emg and not rem
a = np.unique(np.union1d(idx['low_delta'], idx['high_emg']))
b = np.setdiff1d(a, rem_idx)
wake = get_sequences(b)
wake_idx = reduce(lambda x,y: np.concatenate((x,y)), wake)
# sequences with low delta, high sigma and low emg are NREM
a = np.intersect1d(np.intersect1d(idx['high_sigma'], idx['low_delta']), idx['low_emg'])
a = np.setdiff1d(a, rem_idx)
sws_idx = np.unique(np.union1d(a, sws_idx))
wake_idx = np.setdiff1d(wake_idx, a)
#NREM sequences with high gamma are wake
a = np.intersect1d(sws_idx, idx['high_gamma'])
sws_idx = np.setdiff1d(sws_idx, a)
wake_idx = np.unique(np.union1d(wake_idx,a))
# Wake and Theta
wake_motion_idx = np.intersect1d(wake_idx, idx['high_theta'])
# Wake w/o Theta
wake_nomotion_idx = np.setdiff1d(wake_idx, idx['low_theta'])
# Are there overlapping sequences?
a = np.intersect1d(np.intersect1d(rem_idx, wake_idx), sws_idx)
# Are there undefined sequences?
undef_idx = np.setdiff1d(np.setdiff1d(np.setdiff1d(np.arange(0,N), rem_idx), wake_idx), sws_idx)
# Wake wins over SWS
sws_idx = np.setdiff1d(sws_idx, wake_idx)
# Special rules
# if there's a REM sequence directly following a short wake sequence (PRE_WAKE_REM),
# this wake sequence goes to SWS
# NREM to REM transitions are sometimes mistaken as quite wake periods
for rem_seq in rem:
if len(rem_seq) > 0:
irem_start = rem_seq[0]
# is there wake in the preceding bin?
if irem_start-1 in wake_idx:
# get the closest sws bin in the preceding history
isws_end = closest_precessor(sws_idx, irem_start)
if (irem_start - isws_end)*dt < PRE_WAKE_REM:
new_rem = np.arange(isws_end+1,irem_start)
rem_idx = np.union1d(rem_idx, new_rem)
wake_idx = np.setdiff1d(wake_idx, new_rem)
else:
new_wake = rem_seq
wake_idx = np.union1d(wake_idx, new_wake)
rem_idx = np.setdiff1d(rem_idx, new_wake)
# two different representations for the results:
S = {}
S['rem'] = rem_idx
S['nrem'] = sws_idx
S['wake'] = wake_idx
S['awake'] = wake_motion_idx
S['qwake'] = wake_nomotion_idx
M = np.zeros((N,))
if len(rem_idx) > 0:
M[rem_idx] = 1
if len(wake_idx) > 0:
M[wake_idx] = 2
if len(sws_idx) > 0:
M[sws_idx] = 3
if len(undef_idx) > 0:
M[undef_idx] = 0
# write sleep annotation to file
if pwrite:
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
print("writing annotation to %s" % outfile)
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M,np.zeros((N,)))]
f.writelines(s)
f.close()
# nice plotting
plt.ion()
if pplot:
plt.figure(figsize=(18,9))
axes1=plt.axes([0.1, 0.9, 0.8, 0.05])
A = np.zeros((1,len(M)))
A[0,:] = M
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,0,0], [0,1,1],[0.5,0,1], [0.8, 0.8, 0.8]], 4)
#tmp = axes1.imshow(A, vmin=0, vmax=3)
tmp = axes1.pcolorfast(t, [0,1], A, vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes1.axis('tight')
tmp.axes.get_xaxis().set_visible(False)
tmp.axes.get_yaxis().set_visible(False)
box_off(axes1)
# show spectrogram
axes2=plt.axes([0.1, 0.75, 0.8, 0.1], sharex=axes1)
ifreq = np.where(freq <= 30)[0]
med = np.median(SPEEG.max(axis=0))
if pspec_norm:
ifreq = np.where(freq <= 80)[0]
filt = np.ones((6, 1))
filt = filt / np.sum(filt)
SPEEG = scipy.signal.convolve2d(SPEEG, filt, mode='same')
spec_mean = SPEEG.mean(axis=1)
SPEEG = np.divide(SPEEG, np.repeat([spec_mean], SPEEG.shape[1], axis=0).T)
med = np.median(SPEEG.max(axis=0))
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax = med*vmax, cmap='jet')
else:
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax=med * vmax, cmap='jet')
axes2.axis('tight')
plt.ylabel('Freq (Hz)')
box_off(axes2)
# show delta power
axes3=plt.axes([0.1, 0.6, 0.8, 0.1], sharex=axes2)
axes3.plot(t,p_delta, color='gray')
plt.ylabel('Delta (a.u.)')
plt.xlim((t[0], t[-1]))
seq = get_sequences(S['nrem'])
#for s in seq:
# plt.plot(t[s],p_delta[s], color='red')
s = idx['high_delta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_delta[s], color='red')
box_off(axes3)
axes4=plt.axes([0.1, 0.45, 0.8, 0.1], sharex=axes3)
axes4.plot(t,p_sigma, color='gray')
plt.ylabel('Sigma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_sigma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_sigma[s], color='red')
box_off(axes4)
axes5=plt.axes([0.1, 0.31, 0.8, 0.1], sharex=axes4)
axes5.plot(t,th_delta, color='gray')
plt.ylabel('Th/Delta (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_theta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],th_delta[s], color='red')
box_off(axes5)
axes6=plt.axes([0.1, 0.17, 0.8, 0.1], sharex=axes5)
axes6.plot(t,p_gamma, color='gray')
plt.ylabel('Gamma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_gamma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_gamma[s], color='red')
box_off(axes6)
axes7=plt.axes([0.1, 0.03, 0.8, 0.1], sharex=axes6)
axes7.plot(t,p_mu, color='gray')
plt.xlabel('Time (s)')
plt.ylabel('EMG (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_emg']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_mu[s], color='red')
box_off(axes7)
plt.show()
# 2nd figure showing distribution of different bands
plt.figure(figsize=(20,3))
axes1 = plt.axes([0.05, 0.1, 0.13, 0.8])
plt.hist(p_delta, bins=100)
plt.plot( | np.nanmean(p_delta) | numpy.nanmean |
import pickle as pkl
from collections import Counter
import numpy as np
import nltk
class DailyDialogCorpus(object):
def __init__(self, corpus_path="data/dailydialog/dailydialog_split.pkl",
max_vocab_cnt=30000, word2vec=True, word2vec_dim=None):
self.word_vec = word2vec
self.word2vec_dim = word2vec_dim
self.word2vec = None
data = pkl.load(open(corpus_path, "rb"))
self.train_data = data["train"]
self.valid_data = data["valid"]
self.test_data = data["test"]
print("DailyDialog Statistics: ")
print("train data size: %d" % len(self.train_data))
print("valid data size: %d" % len(self.valid_data))
print("test data size: %d" % len(self.test_data))
print("\n")
# DailyDialog Statistics:
# train data size: 10117
# valid data size: 1500
# test data size: 1500
self.train_corpus = self.process(self.train_data)
self.valid_corpus = self.process(self.valid_data)
self.test_corpus = self.process(self.test_data)
print(" [*] Building word vocabulary.")
self.build_vocab(max_vocab_cnt)
print(" [*] Loading word2vec.")
self.load_word2vec()
def process(self, data):
new_meta = []
new_dialog = []
all_lenes = []
new_utts = []
for obj in data:
topic = obj["topic"]
dial = obj["utts"]
lower_utts = [
(
item["floor"],
# ["<s>"] + item["text"].lower().strip().split(" ") + ["</s>"],
["<s>"] + nltk.WordPunctTokenizer().tokenize(item["text"].lower().strip()) + ["</s>"],
(item["act"], item["emot"])
) for item in dial]
# first
all_lenes.extend([len(u) for c, u, f in lower_utts])
# second
new_utts.extend([utt for floor, utt, feat in lower_utts])
# third
dialog = [(utt, floor, feat) for floor, utt, feat in lower_utts]
new_dialog.append(dialog)
# fourth
meta = (topic,)
new_meta.append(meta)
print("max_utt_len %d, min_utt_len %d, mean_utt_len %.4f" % \
(np.max(all_lenes),np.min(all_lenes), float(np.mean(all_lenes))))
# Max utt len 298, Min utt len 3, Mean utt len 16.54
# Max utt len 156, Min utt len 3, Mean utt len 16.83
# Max utt len 232, Min utt len 3, Mean utt len 16.80
return {"dialog": new_dialog, "meta": new_meta, "utts": new_utts}
def build_vocab(self, max_vocab_cnt):
all_words = []
for tokens in self.train_corpus["utts"]:
all_words.extend(tokens)
vocab_count = Counter(all_words).most_common()
raw_vocab_size = len(vocab_count)
discard_wc = np.sum([c for t, c, in vocab_count[max_vocab_cnt:]])
vocab_count = vocab_count[0:max_vocab_cnt]
self.vocab = ["<pad>", "<unk>"] + [t for t, cnt in vocab_count] # list
self.rev_vocab = self.word2idx = {word:idx for idx, word in enumerate(self.vocab)} # dict
self.idx2word = {idx:word for idx, word in enumerate(self.vocab)} # dict
self.pad_id = self.word2idx["<pad>"]
self.unk_id = self.word2idx["<unk>"]
self.sos_id = self.word2idx["<s>"]
self.eos_id = self.word2idx["</s>"]
self.vocab_size = len(self.vocab)
print("raw_vocab_size %d, actual_vocab_size %d, at cut_off frequent %d OOV rate %f"
% (raw_vocab_size, self.vocab_size,
vocab_count[-1][1],
float(discard_wc) / len(all_words)))
print("<pad> index %d" % self.pad_id)
print("<unk> index %d" % self.unk_id)
print("<s> index %d" % self.sos_id)
print("</s> index %d" % self.eos_id)
print("\n")
print("Building topic vocabulary...")
all_topics = []
for topic, in self.train_corpus["meta"]:
all_topics.append(topic)
self.topic_vocab = [t for t, cnt in Counter(all_topics).most_common()]
self.rev_topic_vocab = {t: idx for idx, t in enumerate(self.topic_vocab)}
print("number of topics: %d" % len(self.topic_vocab))
print(self.topic_vocab)
print("\n")
all_dialog_acts = []
all_emots = []
for dialog in self.train_corpus["dialog"]:
all_dialog_acts.extend([feat[0] for floor, utt, feat in dialog if feat is not None])
all_emots.extend([feat[1] for floor, utt, feat in dialog if feat is not None])
print("Building act vocabulary...")
self.dialog_act_vocab = [t for t, cnt in Counter(all_dialog_acts).most_common()]
self.rev_dialog_act_vocab = {t: idx for idx, t in enumerate(self.dialog_act_vocab)}
print("number of acts: %d" % len(self.dialog_act_vocab))
print(self.dialog_act_vocab)
print("\n")
print("Building emotion vocabulary...")
self.dialog_emot_vocab = [t for t, cnt in Counter(all_emots).most_common()]
self.rev_dialog_emot_vocab = {t: idx for idx, t in enumerate(self.dialog_emot_vocab)}
print("number of emots: %d" % len(self.dialog_emot_vocab))
print(self.dialog_emot_vocab)
print("\n")
def load_word2vec(self):
if self.word_vec is False:
print(" [*] No word2vec provided.")
return None
with open("data/glove.twitter.27B.200d.txt", "r") as f:
lines = f.readlines()
raw_word2vec = {}
for l in lines:
w, vec = l.split(" ", 1)
raw_word2vec[w] = vec
self.word2vec = None
oov_cnt = 0
for word in self.vocab:
str_vec = raw_word2vec.get(word, None)
if str_vec is None:
oov_cnt += 1
vec = np.random.randn(self.word2vec_dim) * 0.1
else:
vec = np.fromstring(str_vec, sep=" ")
vec = np.expand_dims(vec, axis=0)
self.word2vec = np.concatenate((self.word2vec, vec),0) if self.word2vec is not None else vec
print(" [*] word2vec shape: ")
print(self.word2vec.shape)
print(" [*] word2vec cannot cover %f vocab" % (float(oov_cnt)/len(self.vocab)))
return self.word2vec
def get_dialog_corpus(self):
def _to_id_corpus(data):
results = []
for dialog in data:
temp = []
for utt, floor, feat in dialog:
if feat is not None:
id_feat = list(feat)
id_feat[0] = self.rev_dialog_act_vocab[feat[0]]
id_feat[1] = self.rev_dialog_emot_vocab[feat[1]]
else:
id_feat = None
# temp.append(([self.rev_vocab.get(t, self.unk_id) for t in utt], floor, id_feat))
temp.append((utt, floor, id_feat))
results.append(temp)
return results
id_train = _to_id_corpus(self.train_corpus["dialog"])
id_valid = _to_id_corpus(self.valid_corpus["dialog"])
id_test = _to_id_corpus(self.test_corpus["dialog"])
return {'train': id_train, 'valid': id_valid, 'test': id_test}
def get_meta_corpus(self):
def _to_id_corpus(data):
results = []
for (topic,) in data:
results.append((self.rev_topic_vocab[topic]))
return results
id_train = _to_id_corpus(self.train_corpus["meta"])
id_valid = _to_id_corpus(self.valid_corpus["meta"])
id_test = _to_id_corpus(self.test_corpus["meta"])
return {'train': id_train, 'valid': id_valid, 'test': id_test}
class SWDADialogCorpus(object):
dialog_act_id = 0
sentiment_id = 1
liwc_id = 2
def __init__(self, corpus_path="data/swda/full_swda_clean_42da_sentiment_dialog_corpus.p",
max_vocab_cnt=30000, word2vec=True, word2vec_dim=None):
self.word_vec = word2vec
self.word2vec_dim = word2vec_dim
self.word2vec = None
# self.dialog_id = 0
# self.meta_id = 1
# self.utt_id = 2
# self.sil_utt = ["<s>", "<sil>", "</s>"]
data = pkl.load(open(corpus_path, "rb"))
self.train_corpus = self.process(data["train"])
self.valid_corpus = self.process(data["valid"])
self.test_corpus = self.process(data["test"])
print("SWDA Statistics: ")
print("train data size: %d" % len(self.train_corpus))
print("valid data size: %d" % len(self.valid_corpus))
print("test data size: %d" % len(self.test_corpus))
print("\n")
self.build_vocab(max_vocab_cnt)
self.load_word2vec()
print("Done loading corpus")
def process(self, data):
new_dialog = []
new_meta = []
new_utts = []
# bod_utt = ["<s>", "<d>", "</s>"]
all_lenes = []
for l in data:
lower_utts = [(caller, ["<s>"] + nltk.WordPunctTokenizer().tokenize(utt.lower().strip()) + ["</s>"], feat)
for caller, utt, feat in l["utts"]]
all_lenes.extend([len(u) for c, u, f in lower_utts])
a_age = float(l["A"]["age"])/100.0
b_age = float(l["B"]["age"])/100.0
a_edu = float(l["A"]["education"])/3.0
b_edu = float(l["B"]["education"])/3.0
vec_a_meta = [a_age, a_edu] + ([0, 1] if l["A"]["sex"] == "FEMALE" else [1, 0])
vec_b_meta = [b_age, b_edu] + ([0, 1] if l["B"]["sex"] == "FEMALE" else [1, 0])
# for joint model we mode two side of speakers together. if A then its 0 other wise 1
meta = (vec_a_meta, vec_b_meta, l["topic"])
dialog = [(utt, int(caller=="B"), feat) for caller, utt, feat in lower_utts]
# dialog = [(bod_utt, 0, None)] + [(utt, int(caller=="B"), feat) for caller, utt, feat in lower_utts]
# new_utts.extend([bod_utt] + [utt for caller, utt, feat in lower_utts])
new_utts.extend([utt for caller, utt, feat in lower_utts])
new_dialog.append(dialog)
new_meta.append(meta)
print("max_utt_len %d, mean_utt_len %.2f, min_utt_len %d" % (
| np.max(all_lenes) | numpy.max |
#!/usr/bin/env python
# coding: utf-8
"""
@author: ackar
Future edits:
- Could add argparse to edits params of ML2
depends on how we want to do it though
"""
import os
from MultiLevel2MC import MultiLevel2
import sys
from multiprocessing import Process
import time
import datetime
import numpy as np
import h5py
from scipy import signal
import multiprocessing
# ourSmoothData imported to smooth resp & lvp data
def ourSmoothData(values, halfwin) :
window = 2 * halfwin + 1
weights = | np.repeat(1.0, window) | numpy.repeat |
import numpy as np
from math import *
import sys
from calc_dmod import calc_lumd
#from calc_kcor import calc_kcor
'''
# get_colors
#
# Takes a list of lines from an SN data file and parses the SN parameters and host colors
# Returns two arrays, one containing arrays of SN peak mag, SALT s, SALT2 x0, x1, and c parameters, and the
# separation of the SN from the host nucleus, and the other containing array pairs of host colors and errors,
# so that plotting can be done easily.
'''
def get_colors(line_list):
mag=[]
mag_err=[]
s=[]
s_err=[]
c=[]
c_err=[]
x0=[]
x0_err=[]
x1=[]
x1_err=[]
sep=[]
u_mag=[]
u_err=[]
g_mag=[]
g_err=[]
r_mag=[]
r_err=[]
i_mag=[]
i_err=[]
z_mag=[]
z_err=[]
for line1 in line_list:
if line1[0]=='#': continue
line=line1.split(',')
if len(line)<2: continue #This is to prevent an error if the line is too short
if line[42]=='0.0': continue #Make sure there is an r-band R_e
redshift=float(line[4])
lumd=calc_lumd(redshift)
dmod=5*np.log10(lumd*10**6)-5
mag.append(float(line[5])-dmod)
if line[6]=='': mag_err.append(0)
else: mag_err.append(float(line[6]))
c.append(float(line[11]))
c_err.append(float(line[12]))
s.append(float(line[13]))
s_err.append(float(line[14]))
sep.append(np.log10(float(line[15])/float(line[42])))
if line[7]=='' or line[9]=='':
x0.append(-99)
x0_err.append(-99)
x1.append(-99)
x1_err.append(-99)
else:
x0.append(float(line[7]))
x0_err.append(float(line[8]))
x1.append(float(line[9]))
x1_err.append(float(line[10]))
u_mag.append(float(line[18]))
u_err.append(float(line[19]))
g_mag.append(float(line[20]))
g_err.append(float(line[21]))
r_mag.append(float(line[22]))
r_err.append(float(line[23]))
i_mag.append(float(line[24]))
i_err.append(float(line[25]))
z_mag.append(float(line[26]))
z_err.append(float(line[27]))
# Convert lists to arrays for manipulation
mag=np.array(mag)
mag_err=np.array(mag_err)
s=np.array(s)
s_err=np.array(s_err)
c=np.array(c)
c_err=np.array(c_err)
x0=np.array(x0)
x0_err=np.array(x0_err)
x1=np.array(x1)
x1_err=np.array(x1_err)
sep=np.array(sep)
u_mag=np.array(u_mag)
u_err=np.array(u_err)
g_mag=np.array(g_mag)
g_err=np.array(g_err)
r_mag=np.array(r_mag)
r_err=np.array(r_err)
i_mag=np.array(i_mag)
i_err=np.array(i_err)
z_mag=np.array(z_mag)
z_err=np.array(z_err)
ug=u_mag-g_mag
ug_err=np.sqrt(u_err**2+g_err**2)
ur=u_mag-r_mag
ur_err=np.sqrt(u_err**2+r_err**2)
ui=u_mag-i_mag
ui_err=np.sqrt(u_err**2+i_err**2)
uz=u_mag-z_mag
uz_err= | np.sqrt(u_err**2+z_err**2) | numpy.sqrt |
# coding: utf-8
# In[1]:
from __future__ import print_function
import os
os.environ["PATH"] += os.pathsep + 'E:/Graphviz2.38/bin'
import tensorflow as tf
# import keras.backend.tensorflow_backend as KTF
# 指定第一块GPU可用
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"# config = tf.ConfigProto()
# config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
# In[2]:
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D, Dropout, concatenate, Concatenate, GlobalAveragePooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding, Bidirectional, LSTM
from keras.models import Model
from models.custom_metrics import hamming_score, f1
from keras import optimizers, regularizers
from keras.callbacks import EarlyStopping
# import matplotlib.pyplot as plt
import logging, pickle
from models.attention import *
from keras.utils import plot_model
# In[3]:
# conv_units = int(sys.argv[1])
# filter_size = 3
# pooling_size = 3
# dropout_rate = float(sys.argv[2])
# dense_units = int(sys.argv[3])
# max_len = int(sys.argv[4])
conv_units = 1024 #int(sys.argv[1])
filter_size = 3
pooling_size = 3
dropout_rate = 0.6 #float(sys.argv[2])
dense_units = 256 #int(sys.argv[3])
max_len = 800 #int(sys.argv[4])
logging.basicConfig(filename='../res/cnn_feature/{}_{}_{}_{}.log'.format(conv_units, dropout_rate, dense_units, max_len), level=logging.INFO)
BASE_DIR = ''
GLOVE_DIR = '../data/'
# EMBEDDING_FILE = 'glove.6B.100d.txt'
EMBEDDING_FILE = 'glove.6B.100d.txt'
MAX_SEQUENCE_LENGTH = max_len
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 100
EMBED_INIT_GLOVE = True
FEAT_NUM = 24
ATTR_NUM = 128
MAX_SENTENCE = 40
# In[4]:
train_file = '../data/issuedialog/train.tsv'
valid_file = '../data/issuedialog/valid.tsv'
test_file = '../data/issuedialog/test.tsv'
train_feat_file = '../data/issuedialog/train_features.tsv'
valid_feat_file = '../data/issuedialog/valid_features.tsv'
test_feat_file = '../data/issuedialog/test_features.tsv'
# In[5]:
# first, build index mapping words in the embeddings set to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, EMBEDDING_FILE), encoding='utf8') as f:
for line in f:
values = line.split(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
# In[6]:
# second, prepare text samples and their labels
print('Processing text dataset')
texts = [] # list of text samples
# labels_index = {'OQ': 0, 'OP': 1, 'OF': 2, 'FD': 3, 'FQ': 4, 'CQ': 5, 'AE': 6, 'AC': 7, 'IG': 8, 'CC': 9, 'UF': 10,
# 'PF': 11, 'NF': 12, 'GG': 13, 'JK': 14}
labels_index = {'0': 0, '1': 1}
id2label = {v: k for k, v in labels_index.items()}
classes_num = len(labels_index)
# def load_data_and_labels(data_file):
# x = []
# y = []
# i = 0
# x_dialog, y_dialog = [], []
# with open(data_file, encoding='utf8') as raw_data:
# for line in raw_data:
# i += 1
# # print(i)
# if line != '\n':
# line = line.strip()
# tokens = line.split('\t')
# labels = tokens[3].split('_')
# x.append(tokens[1])
# each_y = [0] * classes_num
# for label in labels:
# each_y[labels_index[label]] = 1
# y.append(each_y)
# return x, y
def load_data_and_labels(data_file):
x = []
y = []
y_issue = []
i = 0
x_dialog, y_dialog = [], []
issue_mark = 0
with open(data_file, encoding='utf8') as raw_data:
for line in raw_data:
i += 1
# print(i)
if line != '\n':
line = line.strip()
tokens = line.split('\t')
labels = tokens[3].split('_')
issue_mark = int(tokens[4])
x_dialog.append(tokens[1])
each_y = [0] * classes_num
for label in labels:
each_y[labels_index[label]] = 1
y_dialog.append(each_y)
else:
# Padding of dialogues.
if len(x_dialog) > MAX_SENTENCE:
x_dialog = x_dialog[0:MAX_SENTENCE]
y_dialog = y_dialog[0:MAX_SENTENCE]
elif len(x_dialog) < MAX_SENTENCE:
x_dialog += [''] * (MAX_SENTENCE - len(x_dialog))
y_dialog += [[1, 0]] * (MAX_SENTENCE - len(y_dialog))
x.append(x_dialog)
y.append(y_dialog)
if issue_mark == 0:
y_issue.append([1, 0])
else:
y_issue.append([0, 1])
x_dialog, y_dialog = [], []
return x, y, y_issue
x_train, y_train, y_train_issue = load_data_and_labels(train_file)
x_valid, y_valid, y_valid_issue = load_data_and_labels(valid_file)
x_test, y_test, y_test_issue = load_data_and_labels(test_file)
# MAX_SEQUENCE_LENGTH = max(max(map(len, x_train)), max(map(len, x_valid)), max(map(len, x_test)))
# print(MAX_SEQUENCE_LENGTH)
labels = np.array(y_train + y_valid + y_test)
print('Found %s texts.' % len(x_train + x_valid + x_test))
# In[7]:
# def load_features(data_file):
# x = []
# i = 0
# x_dialog = []
# with open(data_file, encoding='utf8') as raw_data:
# for line in raw_data:
# i += 1
# # print(i)
# if line != '\n':
# line = line.strip()
# tokens = line.split('\t')
# features = list(map(float, tokens[1].split()))
# x.append(features)
#
# return np.array(x)
def load_features(data_file):
x = []
i = 0
x_dialog = []
len_features = 0
with open(data_file, encoding='utf8') as raw_data:
for line in raw_data:
i += 1
# print(i)
if line != '\n':
line = line.strip()
tokens = line.split('\t')
features = list(map(float, tokens[1].split()))
len_features = len(features)
x_dialog.append(features)
else:
if len(x_dialog) > MAX_SENTENCE:
x_dialog = x_dialog[0:MAX_SENTENCE]
elif len(x_dialog) < MAX_SENTENCE:
x_dialog += [[0] * len_features] * (MAX_SENTENCE - len(x_dialog))
x.append(x_dialog)
x_dialog = []
return np.array(x)
x_train_feat = load_features(train_feat_file)
x_val_feat = load_features(valid_feat_file)
x_test_feat = load_features(test_feat_file)
print('Found %s features.' % len(x_train_feat[0]))
# In[8]:
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
x_train_sample, x_valid_sample, x_test_sample = [], [], []
for each_train in x_train:
x_train_sample += each_train
for each_valid in x_valid:
x_valid_sample += each_valid
for each_test in x_test:
x_test_sample += each_test
tokenizer.fit_on_texts(x_train_sample + x_valid_sample)
sequences = tokenizer.texts_to_sequences(x_train_sample + x_valid_sample + x_test_sample)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
# labels = to_categorical(np.asarray(y_train))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
print('Shape of feature tensor:', x_train_feat.shape)
# In[9]:
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
if EMBED_INIT_GLOVE:
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH * MAX_SENTENCE,
trainable=True)
else:
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer='uniform',
input_length=MAX_SEQUENCE_LENGTH * MAX_SENTENCE)
# In[ ]:
data_group, data_unit = [], []
for i, data_each in enumerate(data):
if i % MAX_SENTENCE == 0 and i > 0:
data_group.append(data_unit)
data_unit = [list(data_each)]
else:
data_unit.append(list(data_each))
data_group.append(data_unit)
num_validation_samples = len(y_valid)
num_test_samples = len(y_test)
num_train_samples = len(y_train)
num_total_samples = len(labels)
x_train = data_group[:num_train_samples]
y_train = labels[:num_train_samples]
x_val = data_group[num_train_samples: num_train_samples + num_validation_samples]
y_val = labels[num_train_samples: num_train_samples + num_validation_samples]
x_test = data_group[-num_test_samples:]
y_test = labels[-num_test_samples:]
assert len(x_train) + len(x_val) + len(x_test) == len(labels)
assert len(y_train) + len(y_val) + len(y_test) == len(labels)
# load sequence features
def load_sequence_features(data_file):
x = []
i = 0
with open(data_file, encoding='utf8') as raw_data:
for line in raw_data:
i += 1
# print(i)
if line != '\n':
line = line.strip()
tokens = line.split('\t')
features = tokens[1].split()
abs_pos = int(features[10])
x.append(abs_pos)
return np.array(x)
x_train_sequence_feat = load_sequence_features(train_feat_file)
x_val_sequence_feat = load_sequence_features(valid_feat_file)
x_test_sequence_feat = load_sequence_features(test_feat_file)
# window_size=3 context information
# def gen_data_with_context(x, x_feat):
# # incorporate pervious one and future one utterances as context
# num_sample, size_sample = x.shape
# x_trans = np.zeros((num_sample, size_sample * 3), dtype=int)
# for i, abs_pos in enumerate(x_feat):
# if abs_pos == 1:
# if i + 1 < len(x_feat):
# if x_feat[i + 1] == 1:
# x_trans[i] = np.hstack((np.zeros(MAX_SEQUENCE_LENGTH), x[i], np.zeros(MAX_SEQUENCE_LENGTH)))
# else:
# x_trans[i] = np.hstack((np.zeros(MAX_SEQUENCE_LENGTH), x[i], x[i + 1]))
# else:
# x_trans[i] = np.hstack((np.zeros(MAX_SEQUENCE_LENGTH), x[i], np.zeros(MAX_SEQUENCE_LENGTH)))
# elif i == num_sample - 1 or x_feat[i + 1] == 1:
# x_trans[i] = np.hstack((x[i - 1], x[i], np.zeros(MAX_SEQUENCE_LENGTH)))
# else:
# x_trans[i] = np.hstack((x[i - 1], x[i], x[i + 1]))
# return x_trans
def gen_data_with_context(x):
# incorporate pervious one and future one utterances as context 497x40x800
# x_trans = np.zeros((num_sample, size_sample * 3), dtype=int)
x_trans, x_sentence = [], []
for dialog_embedding in x:
for i, sentence_embedding in enumerate(dialog_embedding):
if i == 0:
x_sentence.append([0] * max_len + sentence_embedding + dialog_embedding[i + 1])
elif i < MAX_SENTENCE - 1:
x_sentence.append(dialog_embedding[i - 1] + sentence_embedding + dialog_embedding[i + 1])
else:
x_sentence.append(dialog_embedding[i - 1] + sentence_embedding + [0] * max_len)
x_trans.append(x_sentence)
x_sentence = []
x_trans = np.array(x_trans)
return x_trans
x_train_with_context = gen_data_with_context(x_train).astype(np.float32)
x_val_with_context = gen_data_with_context(x_val).astype(np.float32)
x_test_with_context = gen_data_with_context(x_test).astype(np.float32)
x_train = np.array(x_train)
x_val = np.array(x_val)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_val = np.array(y_val)
y_test = np.array(y_test)
y_train_issue = np.array(y_train_issue)
y_valid_issue = np.array(y_valid_issue)
y_test_issue = | np.array(y_test_issue) | numpy.array |
""" SciANN-SolidMechanics.py
Description:
SciANN code for solution and discovery of solid mechanics from data.
For additional details, please check our paper at: https://arxiv.org/abs/2003.02751
Created by <NAME> on 2/14/20.
"""
import os, sys, time
import numpy as np
from sciann.utils.math import diff
from sciann import SciModel, Functional, Parameter
from sciann import Data, Tie
from sciann import Variable, Field
import matplotlib.pyplot as plt
import argparse
pi = np.pi
# current file name.
current_file_name = os.path.basename(__file__).split(".")[0]
# Lame paramters used in the paper.
lmbd = 1.0
mu = 0.5
qload = 4.0
# Input interface for python.
parser = argparse.ArgumentParser(description='''
SciANN code for solution and discovery of solid mechanics from data. \n
For additional details, please check our paper at: https://arxiv.org/submit/3042511'''
)
# Define number of data points.
parser.add_argument('-l', '--layers', help='Num layers and neurons (default 4 layers each 40 neurons [40, 40, 40, 40])', type=int, nargs='+', default=[40]*4)
parser.add_argument('-af', '--actf', help='Activation function (default tanh)', type=str, nargs=1, default=['tanh'])
parser.add_argument('-nx', '--numx', help='Num Node in X (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-ny', '--numy', help='Num Node in Y (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-bs', '--batchsize', help='Batch size for Adam optimizer (default 32)', type=int, nargs=1, default=[32])
parser.add_argument('-e', '--epochs', help='Maximum number of epochs (default 2000)', type=int, nargs=1, default=[5000])
parser.add_argument('-lr', '--learningrate', help='Initial learning rate (default 0.001)', type=float, nargs=1, default=[0.001])
parser.add_argument('-in', '--independent_networks', help='Use independent networks for each var (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('-v', '--verbose', help='Show training progress (default 2) (check Keras.fit)', type=int, nargs=1, default=[2])
parser.add_argument('--shuffle', help='Shuffle data for training (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('--stopafter', help='Patience argument from Keras (default 500)', type=int, nargs=1, default=[500])
parser.add_argument('--savefreq', help='Frequency to save weights (each n-epoch)', type=int, nargs=1, default=[100000])
parser.add_argument('--dtype', help='Data type for weights and biases (default float64)', type=str, nargs=1, default=['float64'])
parser.add_argument('--gpu', help='Use GPU if available (default False)', type=bool, nargs=1, default=[False])
parser.add_argument('-op', '--outputpath', help='Output path (default ./file_name)', type=str, nargs=1, default=['output'])
parser.add_argument('-of', '--outputprefix', help='Output path (default res**)', type=str, nargs=1, default=['res'])
parser.add_argument('-nxp', '--numxplot', help='Num Node in X for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('-nyp', '--numyplot', help='Num Node in Y for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('--plot', help='Plot the model', nargs='?', default=False)
args = parser.parse_args()
if not args.gpu[0]:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def load(xx):
x, y = xx[0], xx[1]
Q = qload
return Q * np.sin(pi*x)
def bodyfx(xx):
x, y = xx[0], xx[1]
Q = qload
frc = - lmbd*(4*pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- mu*(pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- 8*mu*pi**2*np.cos(2*pi*x)*np.sin(pi*y)
return frc
def bodyfy(xx):
x, y = xx[0], xx[1]
Q = qload
frc = lmbd*(3*Q*y**2*np.sin(pi*x) - 2*pi**2*np.cos(pi*y)*np.sin(2*pi*x)) \
- mu*(2*pi**2*np.cos(pi*y)*np.sin(2*pi*x) + (Q*y**4*pi**2*np.sin(pi*x))/4) \
+ 6*Q*mu*y**2*np.sin(pi*x)
return frc
def dispx(xx):
x, y = xx[0], xx[1]
return np.cos(2*pi*x) * np.sin(pi*y)
def dispy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x) * Q * y**4/4
def strainxx(xx):
x, y = xx[0], xx[1]
Q = qload
return -2*pi*np.sin(2*pi*x)*np.sin(pi*y)
def strainyy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x)*Q*y**3
def strainxy(xx):
x, y = xx[0], xx[1]
Q = qload
return 0.5*(pi*np.cos(2*pi*x)*np.cos(pi*y) + pi*np.cos(pi*x)*Q*y**4/4)
def stressxx(xx):
return (lmbd+2*mu)*strainxx(xx) + lmbd*strainyy(xx)
def stressyy(xx):
return (lmbd+2*mu)*strainyy(xx) + lmbd*strainxx(xx)
def stressxy(xx):
return 2.0*mu*strainxy(xx)
def cust_pcolor(AX, X, Y, C, title):
im = AX.pcolor(X, Y, C, cmap="jet")
AX.axis("equal")
AX.axis("off")
AX.set_title(title, fontsize=14)
plt.colorbar(im, ax=AX)
def cust_semilogx(AX, X, Y, xlabel, ylabel):
if X is None:
im = AX.semilogy(Y)
else:
im = AX.semilogy(X, Y)
if xlabel is not None: AX.set_xlabel(xlabel)
if ylabel is not None: AX.set_ylabel(ylabel)
def train():
# define output folder.
if not os.path.isdir(args.outputpath[0]):
os.mkdir(args.outputpath[0])
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join([str(x) for x in args.layers])
# Neural Network Setup.
x = Variable("x", dtype=args.dtype[0])
y = Variable("y", dtype=args.dtype[0])
if args.independent_networks[0]:
Uxy = Functional("Uxy", [x, y], args.layers, args.actf[0])
Vxy = Functional("Vxy", [x, y], args.layers, args.actf[0])
Sxx = Functional("Sxx", [x, y], args.layers, args.actf[0])
Syy = Functional("Syy", [x, y], args.layers, args.actf[0])
Sxy = Functional("Sxy", [x, y], args.layers, args.actf[0])
else:
Uxy, Vxy, Sxx, Syy, Sxy = Functional(
["Uxy", "Vxy", "Sxx", "Syy", "Sxy"],
[x, y],
args.layers, args.actf[0]).split()
lame1 = Parameter(2.0, inputs=[x,y], name="lame1")
lame2 = Parameter(2.0, inputs=[x,y], name="lame2")
C11 = (2*lame2 + lame1)
C12 = lame1
C33 = 2*lame2
Exx = diff(Uxy, x)
Eyy = diff(Vxy, y)
Exy = (diff(Uxy, y) + diff(Vxy, x))*0.5
# Define constraints
d1 = Data(Uxy)
d2 = Data(Vxy)
d3 = Data(Sxx)
d4 = Data(Syy)
d5 = Data(Sxy)
c1 = Tie(Sxx, Exx*C11 + Eyy*C12)
c2 = Tie(Syy, Eyy*C11 + Exx*C12)
c3 = Tie(Sxy, Exy*C33)
Lx = diff(Sxx, x) + diff(Sxy, y)
Ly = diff(Sxy, x) + diff(Syy, y)
# Define the optimization model (set of inputs and constraints)
model = SciModel(
inputs=[x, y],
targets=[d1, d2, d3, d4, d5, c1, c2, c3, Lx, Ly],
loss_func="mse"
)
with open("{}_summary".format(fname), "w") as fobj:
model.summary(print_fn=lambda x: fobj.write(x + '\n'))
# Prepare training data
## Training grid
XMIN, XMAX = 0.0, 1.0
YMIN, YMAX = 0.0, 1.0
Xmesh = np.linspace(XMIN, XMAX, args.numx[0]).reshape((-1, 1))
Ymesh = np.linspace(YMIN, YMAX, args.numy[0]).reshape((-1, 1))
X, Y = np.meshgrid(Xmesh, Ymesh)
input_data = [X.reshape(-1, 1), Y.reshape(-1, 1)]
# Assuing data is known only at boundary conditions
XTOL, YTOL = np.array([XMAX-XMIN, YMAX-YMIN])*1e-6
left_ids = np.where(abs(input_data[0] - XMIN) < XTOL)[0]
right_ids = np.where(abs(input_data[0] - XMAX) < XTOL)[0]
bot_ids = np.where(abs(input_data[1] - YMIN) < YTOL)[0]
top_ids = np.where(abs(input_data[1] - YMAX) < YTOL)[0]
BC_ids = np.unique(np.concatenate([left_ids, right_ids, bot_ids, top_ids]))
## data associated to constrains defined earlier
# Define constraints
data_d1 = dispx(input_data)
data_d2 = dispy(input_data)
data_d3 = stressxx(input_data)
data_d4 = stressyy(input_data)
data_d5 = stressxy(input_data)
data_c1 = 'zeros'
data_c2 = 'zeros'
data_c3 = 'zeros'
data_Lx = bodyfx(input_data)
data_Ly = bodyfy(input_data)
target_data = [(BC_ids, data_d1), #BC: Ux - only applied at BC_ids
(BC_ids, data_d2), #BC: Uy - only applied at BC_ids
(BC_ids, data_d3), #BC: Sxx - only applied at BC_ids
(BC_ids, data_d4), #BC: Syy - only applied at BC_ids
(BC_ids, data_d5), #BC: Sxy - only applied at BC_ids
data_c1, data_c2, data_c3, #Impose the constitutive model at all test points
data_Lx, data_Ly] #Impose the body force at all test points
# Train the model
training_time = time.time()
history = model.train(
x_true=input_data,
y_true=target_data,
epochs=args.epochs[0],
batch_size=args.batchsize[0],
shuffle=args.shuffle[0],
learning_rate=args.learningrate[0],
stop_after=args.stopafter[0],
verbose=args.verbose[0],
save_weights_to="{}_WEIGHTS".format(fname),
save_weights_freq=args.savefreq[0]
)
training_time = time.time() - training_time
for loss in history.history:
np.savetxt(fname+"_{}".format("_".join(loss.split("/"))),
np.array(history.history[loss]).reshape(-1, 1))
time_steps = np.linspace(0, training_time, len(history.history["loss"]))
np.savetxt(fname+"_Time", time_steps.reshape(-1,1))
# Post process the trained model.
Xmesh_plot = np.linspace(XMIN, XMAX, args.numxplot[0]).reshape((-1, 1))
Ymesh_plot = | np.linspace(YMIN, YMAX, args.numyplot[0]) | numpy.linspace |
# importing necessary modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# reading the csv from github
df = pd.read_csv('https://raw.githubusercontent.com/leo-ventura/programming-languages/master/data.csv')
# temos que separar as linguagens de programação dos usuários (linguages como nosso x e usuarios como y)
features = ['assembly','batchfile','c','c#','c++','clojure','coffeescript','css','elixir','emacs lisp','go','haskell','html','java','javascript','jupyter notebook','kotlin','lua','matlab','objective-c','objective-c++','ocaml','perl','php','powershell','purebasic','python','rascal','ruby','rust','scala','shell','swift','tex','typescript','vim script','vue']
x = df.loc[:,features].values
# a ser usado no plot
marker = itertools.cycle(('X', 'o', '*'))
# criando vetor de soma 1
X_sum1 = | np.copy(x) | numpy.copy |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.